☰
In core::arch::wasm32
In core::arch::wasm32
Structs
v128
Functions
f32x4
f32x4_abs
f32x4_add
f32x4_ceil
f32x4_convert_i32x4
f32x4_convert_u32x4
f32x4_demote_f64x2_zero
f32x4_div
f32x4_eq
f32x4_extract_lane
f32x4_floor
f32x4_ge
f32x4_gt
f32x4_le
f32x4_lt
f32x4_max
f32x4_min
f32x4_mul
f32x4_ne
f32x4_nearest
f32x4_neg
f32x4_pmax
f32x4_pmin
f32x4_replace_lane
f32x4_splat
f32x4_sqrt
f32x4_sub
f32x4_trunc
f64x2
f64x2_abs
f64x2_add
f64x2_ceil
f64x2_convert_low_i32x4
f64x2_convert_low_u32x4
f64x2_div
f64x2_eq
f64x2_extract_lane
f64x2_floor
f64x2_ge
f64x2_gt
f64x2_le
f64x2_lt
f64x2_max
f64x2_min
f64x2_mul
f64x2_ne
f64x2_nearest
f64x2_neg
f64x2_pmax
f64x2_pmin
f64x2_promote_low_f32x4
f64x2_replace_lane
f64x2_splat
f64x2_sqrt
f64x2_sub
f64x2_trunc
i16x8
i16x8_abs
i16x8_add
i16x8_add_sat
i16x8_all_true
i16x8_bitmask
i16x8_eq
i16x8_extadd_pairwise_i8x16
i16x8_extadd_pairwise_u8x16
i16x8_extend_high_i8x16
i16x8_extend_high_u8x16
i16x8_extend_low_i8x16
i16x8_extend_low_u8x16
i16x8_extmul_high_i8x16
i16x8_extmul_high_u8x16
i16x8_extmul_low_i8x16
i16x8_extmul_low_u8x16
i16x8_extract_lane
i16x8_ge
i16x8_gt
i16x8_le
i16x8_load_extend_i8x8
i16x8_load_extend_u8x8
i16x8_lt
i16x8_max
i16x8_min
i16x8_mul
i16x8_narrow_i32x4
i16x8_ne
i16x8_neg
i16x8_q15mulr_sat
i16x8_replace_lane
i16x8_shl
i16x8_shr
i16x8_shuffle
i16x8_splat
i16x8_sub
i16x8_sub_sat
i32x4
i32x4_abs
i32x4_add
i32x4_all_true
i32x4_bitmask
i32x4_dot_i16x8
i32x4_eq
i32x4_extadd_pairwise_i16x8
i32x4_extadd_pairwise_u16x8
i32x4_extend_high_i16x8
i32x4_extend_high_u16x8
i32x4_extend_low_i16x8
i32x4_extend_low_u16x8
i32x4_extmul_high_i16x8
i32x4_extmul_high_u16x8
i32x4_extmul_low_i16x8
i32x4_extmul_low_u16x8
i32x4_extract_lane
i32x4_ge
i32x4_gt
i32x4_le
i32x4_load_extend_i16x4
i32x4_load_extend_u16x4
i32x4_lt
i32x4_max
i32x4_min
i32x4_mul
i32x4_ne
i32x4_neg
i32x4_replace_lane
i32x4_shl
i32x4_shr
i32x4_shuffle
i32x4_splat
i32x4_sub
i32x4_trunc_sat_f32x4
i32x4_trunc_sat_f64x2_zero
i64x2
i64x2_abs
i64x2_add
i64x2_all_true
i64x2_bitmask
i64x2_eq
i64x2_extend_high_i32x4
i64x2_extend_high_u32x4
i64x2_extend_low_i32x4
i64x2_extend_low_u32x4
i64x2_extmul_high_i32x4
i64x2_extmul_high_u32x4
i64x2_extmul_low_i32x4
i64x2_extmul_low_u32x4
i64x2_extract_lane
i64x2_ge
i64x2_gt
i64x2_le
i64x2_load_extend_i32x2
i64x2_load_extend_u32x2
i64x2_lt
i64x2_mul
i64x2_ne
i64x2_neg
i64x2_replace_lane
i64x2_shl
i64x2_shr
i64x2_shuffle
i64x2_splat
i64x2_sub
i8x16
i8x16_abs
i8x16_add
i8x16_add_sat
i8x16_all_true
i8x16_bitmask
i8x16_eq
i8x16_extract_lane
i8x16_ge
i8x16_gt
i8x16_le
i8x16_lt
i8x16_max
i8x16_min
i8x16_narrow_i16x8
i8x16_ne
i8x16_neg
i8x16_popcnt
i8x16_replace_lane
i8x16_shl
i8x16_shr
i8x16_shuffle
i8x16_splat
i8x16_sub
i8x16_sub_sat
i8x16_swizzle
memory_atomic_notify
memory_atomic_wait32
memory_atomic_wait64
memory_grow
memory_size
u16x8
u16x8_add
u16x8_add_sat
u16x8_all_true
u16x8_avgr
u16x8_bitmask
u16x8_eq
u16x8_extadd_pairwise_u8x16
u16x8_extend_high_u8x16
u16x8_extend_low_u8x16
u16x8_extmul_high_u8x16
u16x8_extmul_low_u8x16
u16x8_extract_lane
u16x8_ge
u16x8_gt
u16x8_le
u16x8_load_extend_u8x8
u16x8_lt
u16x8_max
u16x8_min
u16x8_mul
u16x8_narrow_i32x4
u16x8_ne
u16x8_replace_lane
u16x8_shl
u16x8_shr
u16x8_shuffle
u16x8_splat
u16x8_sub
u16x8_sub_sat
u32x4
u32x4_add
u32x4_all_true
u32x4_bitmask
u32x4_eq
u32x4_extadd_pairwise_u16x8
u32x4_extend_high_u16x8
u32x4_extend_low_u16x8
u32x4_extmul_high_u16x8
u32x4_extmul_low_u16x8
u32x4_extract_lane
u32x4_ge
u32x4_gt
u32x4_le
u32x4_load_extend_u16x4
u32x4_lt
u32x4_max
u32x4_min
u32x4_mul
u32x4_ne
u32x4_replace_lane
u32x4_shl
u32x4_shr
u32x4_shuffle
u32x4_splat
u32x4_sub
u32x4_trunc_sat_f32x4
u32x4_trunc_sat_f64x2_zero
u64x2
u64x2_add
u64x2_all_true
u64x2_bitmask
u64x2_eq
u64x2_extend_high_u32x4
u64x2_extend_low_u32x4
u64x2_extmul_high_u32x4
u64x2_extmul_low_u32x4
u64x2_extract_lane
u64x2_load_extend_u32x2
u64x2_mul
u64x2_ne
u64x2_replace_lane
u64x2_shl
u64x2_shr
u64x2_shuffle
u64x2_splat
u64x2_sub
u8x16
u8x16_add
u8x16_add_sat
u8x16_all_true
u8x16_avgr
u8x16_bitmask
u8x16_eq
u8x16_extract_lane
u8x16_ge
u8x16_gt
u8x16_le
u8x16_lt
u8x16_max
u8x16_min
u8x16_narrow_i16x8
u8x16_ne
u8x16_popcnt
u8x16_replace_lane
u8x16_shl
u8x16_shr
u8x16_shuffle
u8x16_splat
u8x16_sub
u8x16_sub_sat
u8x16_swizzle
unreachable
v128_and
v128_andnot
v128_any_true
v128_bitselect
v128_load
v128_load16_lane
v128_load16_splat
v128_load32_lane
v128_load32_splat
v128_load32_zero
v128_load64_lane
v128_load64_splat
v128_load64_zero
v128_load8_lane
v128_load8_splat
v128_not
v128_or
v128_store
v128_store16_lane
v128_store32_lane
v128_store64_lane
v128_store8_lane
v128_xor
?
Function
core
::
arch
::
wasm32
::
i16x8_abs
1.54.0
·
source
·
[
−
]
pub fn i16x8_abs(a:
v128
) ->
v128
Available on
target_family="wasm"
and target feature
simd128
and WebAssembly
only.
Expand description
Lane-wise wrapping absolute value.