impl deref.rs<&Self> for Simd<T, _>

Instead of implementing each "deref" pattern for every single scalar,
we can use type parameters for Simd operating on &Self.
We can use a macro, but keep it cleaner and more explicit.
This commit is contained in:
Jubilee Young 2021-11-23 16:15:19 -08:00
parent b8d6b68446
commit 0a6992f5bf
2 changed files with 76 additions and 56 deletions

View file

@ -1,5 +1,11 @@
use crate::simd::intrinsics;
use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
use core::ops::{Add, Mul};
use core::ops::{BitAnd, BitOr, BitXor};
use core::ops::{Div, Rem, Sub};
use core::ops::{Shl, Shr};
mod deref;
impl<I, T, const LANES: usize> core::ops::Index<I> for Simd<T, LANES>
where
@ -57,42 +63,6 @@ macro_rules! impl_ref_ops {
$(#[$attrs])*
fn $fn($self_tok, $rhs_arg: $rhs_arg_ty) -> Self::Output $body
}
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type
where
LaneCount<$lanes2>: SupportedLaneCount,
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
$(#[$attrs])*
fn $fn($self_tok, $rhs_arg: &$rhs) -> Self::Output {
core::ops::$trait::$fn($self_tok, *$rhs_arg)
}
}
impl<const $lanes: usize> core::ops::$trait<$rhs> for &'_ $type
where
LaneCount<$lanes2>: SupportedLaneCount,
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
$(#[$attrs])*
fn $fn($self_tok, $rhs_arg: $rhs) -> Self::Output {
core::ops::$trait::$fn(*$self_tok, $rhs_arg)
}
}
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for &'_ $type
where
LaneCount<$lanes2>: SupportedLaneCount,
{
type Output = <$type as core::ops::$trait<$rhs>>::Output;
$(#[$attrs])*
fn $fn($self_tok, $rhs_arg: &$rhs) -> Self::Output {
core::ops::$trait::$fn(*$self_tok, *$rhs_arg)
}
}
};
// binary assignment op
@ -112,16 +82,6 @@ macro_rules! impl_ref_ops {
$(#[$attrs])*
fn $fn(&mut $self_tok, $rhs_arg: $rhs_arg_ty) $body
}
impl<const $lanes: usize> core::ops::$trait<&'_ $rhs> for $type
where
LaneCount<$lanes2>: SupportedLaneCount,
{
$(#[$attrs])*
fn $fn(&mut $self_tok, $rhs_arg: &$rhs_arg_ty) {
core::ops::$trait::$fn($self_tok, *$rhs_arg)
}
}
};
// unary op
@ -141,16 +101,6 @@ macro_rules! impl_ref_ops {
type Output = $output;
fn $fn($self_tok) -> Self::Output $body
}
impl<const $lanes: usize> core::ops::$trait for &'_ $type
where
LaneCount<$lanes2>: SupportedLaneCount,
{
type Output = <$type as core::ops::$trait>::Output;
fn $fn($self_tok) -> Self::Output {
core::ops::$trait::$fn(*$self_tok)
}
}
}
}

View file

@ -0,0 +1,70 @@
//! This module hacks in "implicit deref" for Simd's operators.
//! Ideally, Rust would take care of this itself,
//! and method calls usually handle the LHS implicitly.
//! So, we'll manually deref the RHS.
use super::*;
macro_rules! deref_ops {
($(impl<T, const LANES: usize> $trait:ident<&Self> for Simd<T, LANES> {
fn $call:ident(rhs: &Self)
})*) => {
$(impl<T, const LANES: usize> $trait<&Self> for Simd<T, LANES>
where
Self: $trait<Self, Output = Self>,
T: SimdElement,
LaneCount<LANES>: SupportedLaneCount,
{
type Output = Self;
#[inline]
#[must_use = "operator returns a new vector without mutating the inputs"]
fn $call(self, rhs: &Self) -> Self::Output {
self.$call(*rhs)
}
})*
}
}
deref_ops! {
// Arithmetic
impl<T, const LANES: usize> Add<&Self> for Simd<T, LANES> {
fn add(rhs: &Self)
}
impl<T, const LANES: usize> Mul<&Self> for Simd<T, LANES> {
fn mul(rhs: &Self)
}
impl<T, const LANES: usize> Sub<&Self> for Simd<T, LANES> {
fn sub(rhs: &Self)
}
impl<T, const LANES: usize> Div<&Self> for Simd<T, LANES> {
fn div(rhs: &Self)
}
impl<T, const LANES: usize> Rem<&Self> for Simd<T, LANES> {
fn rem(rhs: &Self)
}
// Bitops
impl<T, const LANES: usize> BitAnd<&Self> for Simd<T, LANES> {
fn bitand(rhs: &Self)
}
impl<T, const LANES: usize> BitOr<&Self> for Simd<T, LANES> {
fn bitor(rhs: &Self)
}
impl<T, const LANES: usize> BitXor<&Self> for Simd<T, LANES> {
fn bitxor(rhs: &Self)
}
impl<T, const LANES: usize> Shl<&Self> for Simd<T, LANES> {
fn shl(rhs: &Self)
}
impl<T, const LANES: usize> Shr<&Self> for Simd<T, LANES> {
fn shr(rhs: &Self)
}
}