typed_floats/types/impls/
ord.rs
1#![allow(clippy::comparison_chain)]
2
3use crate::{
4 Negative, NegativeFinite, NonNaN, NonNaNFinite, NonZeroNonNaN, NonZeroNonNaNFinite, Positive,
5 PositiveFinite, StrictlyNegative, StrictlyNegativeFinite, StrictlyPositive,
6 StrictlyPositiveFinite,
7};
8
9macro_rules! impl_ord {
10 ($type:ident) => {
11 impl Ord for $type<f32> {
12 #[inline]
13 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
14 #[allow(clippy::float_cmp)]
15 if self.get() < other.get() {
16 core::cmp::Ordering::Less
17 } else if self.get() == other.get() {
18 core::cmp::Ordering::Equal
19 } else {
20 core::cmp::Ordering::Greater
21 }
22 }
23 }
24
25 impl Ord for $type<f64> {
26 #[inline]
27 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
28 #[allow(clippy::float_cmp)]
29 if self.get() < other.get() {
30 core::cmp::Ordering::Less
31 } else if self.get() == other.get() {
32 core::cmp::Ordering::Equal
33 } else {
34 core::cmp::Ordering::Greater
35 }
36 }
37 }
38 };
39}
40
41macro_rules! impl_fast_ord {
42 ($type:ident) => {
43 impl Ord for $type<f32> {
44 #[inline]
45 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
46 self.get().to_bits().cmp(&other.get().to_bits())
47 }
48 }
49
50 impl Ord for $type<f64> {
51 #[inline]
52 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
53 self.get().to_bits().cmp(&other.get().to_bits())
54 }
55 }
56 };
57}
58
59macro_rules! impl_fast_inv_ord {
60 ($type:ident) => {
61 impl Ord for $type<f32> {
62 #[inline]
63 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
64 other.get().to_bits().cmp(&self.get().to_bits())
65 }
66 }
67
68 impl Ord for $type<f64> {
69 #[inline]
70 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
71 other.get().to_bits().cmp(&self.get().to_bits())
72 }
73 }
74 };
75}
76
77macro_rules! impl_partial_ord {
78 ($type:ident) => {
79 impl PartialOrd for $type<f32> {
80 #[inline]
81 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
82 Some(self.cmp(other))
83 }
84 }
85
86 impl PartialOrd<f32> for $type<f32> {
87 #[inline]
88 fn partial_cmp(&self, other: &f32) -> Option<core::cmp::Ordering> {
89 self.get().partial_cmp(other)
90 }
91 }
92
93 impl PartialOrd<$type<f32>> for f32 {
94 #[inline]
95 fn partial_cmp(&self, other: &$type<f32>) -> Option<core::cmp::Ordering> {
96 self.partial_cmp(&other.0)
97 }
98 }
99
100 impl PartialOrd for $type<f64> {
101 #[inline]
102 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
103 Some(self.cmp(other))
104 }
105 }
106
107 impl PartialOrd<f64> for $type<f64> {
108 #[inline]
109 fn partial_cmp(&self, other: &f64) -> Option<core::cmp::Ordering> {
110 self.get().partial_cmp(other)
111 }
112 }
113
114 impl PartialOrd<$type<f64>> for f64 {
115 #[inline]
116 fn partial_cmp(&self, other: &$type<f64>) -> Option<core::cmp::Ordering> {
117 self.partial_cmp(&other.0)
118 }
119 }
120 };
121}
122
123impl_ord!(NonNaN);
124impl_ord!(NonZeroNonNaN);
125impl_ord!(NonNaNFinite);
126impl_ord!(NonZeroNonNaNFinite);
127impl_fast_ord!(Positive);
128impl_fast_inv_ord!(Negative);
129impl_fast_ord!(PositiveFinite);
130impl_fast_inv_ord!(NegativeFinite);
131impl_fast_ord!(StrictlyPositive);
132impl_fast_inv_ord!(StrictlyNegative);
133impl_fast_ord!(StrictlyPositiveFinite);
134impl_fast_inv_ord!(StrictlyNegativeFinite);
135
136impl_partial_ord!(NonNaN);
137impl_partial_ord!(NonZeroNonNaN);
138impl_partial_ord!(NonNaNFinite);
139impl_partial_ord!(NonZeroNonNaNFinite);
140impl_partial_ord!(Positive);
141impl_partial_ord!(Negative);
142impl_partial_ord!(PositiveFinite);
143impl_partial_ord!(NegativeFinite);
144impl_partial_ord!(StrictlyPositive);
145impl_partial_ord!(StrictlyNegative);
146impl_partial_ord!(StrictlyPositiveFinite);
147impl_partial_ord!(StrictlyNegativeFinite);