fixedpoint_sse.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. // Copyright 2015 Google Inc. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // fixedpoint_SSE.h: optimized SSE specializations of the templates
  15. // in fixedpoint.h.
  16. #ifndef GEMMLOWP_INTERNAL_FIXEDPOINT_SSE_H_
  17. #define GEMMLOWP_INTERNAL_FIXEDPOINT_SSE_H_
  18. #include <smmintrin.h>
  19. #include "fixedpoint.h"
  20. namespace gemmlowp {
  21. // SSE intrinsics are not finely typed: there is a single __m128i vector
  22. // type that does not distinguish between "int32x4" and "int16x8" use
  23. // cases, unlike the NEON equivalents. Because we had initially focused
  24. // on int32x4, we did not pay attention and specialized these fixedpoint
  25. // templates directly for __m128i hardcoding the int32x4 semantics,
  26. // not leaving room for int16x8 semantics. Amending that by adding a separate
  27. // data type, int16x8_m128i, that wraps __m128i while being a separate
  28. // type.
  29. struct int16x8_m128i {
  30. int16x8_m128i() {}
  31. explicit int16x8_m128i(__m128i w) : v(w) {}
  32. ~int16x8_m128i() {}
  33. __m128i v;
  34. };
  35. template <>
  36. struct FixedPointRawTypeTraits<__m128i> {
  37. typedef std::int32_t ScalarRawType;
  38. static constexpr int kLanes = 4;
  39. };
  40. template <>
  41. struct FixedPointRawTypeTraits<int16x8_m128i> {
  42. typedef std::int16_t ScalarRawType;
  43. static constexpr int kLanes = 8;
  44. };
  45. template <>
  46. inline __m128i BitAnd(__m128i a, __m128i b) {
  47. return _mm_and_si128(a, b);
  48. }
  49. template <>
  50. inline int16x8_m128i BitAnd(int16x8_m128i a, int16x8_m128i b) {
  51. return int16x8_m128i(_mm_and_si128(a.v, b.v));
  52. }
  53. template <>
  54. inline __m128i BitOr(__m128i a, __m128i b) {
  55. return _mm_or_si128(a, b);
  56. }
  57. template <>
  58. inline int16x8_m128i BitOr(int16x8_m128i a, int16x8_m128i b) {
  59. return int16x8_m128i(_mm_or_si128(a.v, b.v));
  60. }
  61. template <>
  62. inline __m128i BitXor(__m128i a, __m128i b) {
  63. return _mm_xor_si128(a, b);
  64. }
  65. template <>
  66. inline int16x8_m128i BitXor(int16x8_m128i a, int16x8_m128i b) {
  67. return int16x8_m128i(_mm_xor_si128(a.v, b.v));
  68. }
  69. template <>
  70. inline __m128i BitNot(__m128i a) {
  71. return _mm_andnot_si128(a, _mm_set1_epi32(-1));
  72. }
  73. template <>
  74. inline int16x8_m128i BitNot(int16x8_m128i a) {
  75. return int16x8_m128i(_mm_andnot_si128(a.v, _mm_set1_epi16(-1)));
  76. }
  77. template <>
  78. inline __m128i Add(__m128i a, __m128i b) {
  79. return _mm_add_epi32(a, b);
  80. }
  81. template <>
  82. inline int16x8_m128i Add(int16x8_m128i a, int16x8_m128i b) {
  83. return int16x8_m128i(_mm_add_epi16(a.v, b.v));
  84. }
  85. template <>
  86. inline __m128i Mul(__m128i a, __m128i b) {
  87. return _mm_mullo_epi32(a, b);
  88. }
  89. template <>
  90. inline int16x8_m128i Mul(int16x8_m128i a, int16x8_m128i b) {
  91. return int16x8_m128i(_mm_mullo_epi16(a.v, b.v));
  92. }
  93. template <>
  94. inline __m128i Sub(__m128i a, __m128i b) {
  95. return _mm_sub_epi32(a, b);
  96. }
  97. template <>
  98. inline int16x8_m128i Sub(int16x8_m128i a, int16x8_m128i b) {
  99. return int16x8_m128i(_mm_sub_epi16(a.v, b.v));
  100. }
  101. template <>
  102. inline __m128i Neg(__m128i a) {
  103. return _mm_sign_epi32(a, _mm_set1_epi32(-1));
  104. }
  105. template <>
  106. inline int16x8_m128i Neg(int16x8_m128i a) {
  107. return int16x8_m128i(_mm_sign_epi16(a.v, _mm_set1_epi16(-1)));
  108. }
  109. template <>
  110. inline __m128i ShiftLeft(__m128i a, int offset) {
  111. return _mm_slli_epi32(a, offset);
  112. }
  113. template <>
  114. inline int16x8_m128i ShiftLeft(int16x8_m128i a, int offset) {
  115. return int16x8_m128i(_mm_slli_epi16(a.v, offset));
  116. }
  117. template <>
  118. inline __m128i ShiftRight(__m128i a, int offset) {
  119. return _mm_srai_epi32(a, offset);
  120. }
  121. template <>
  122. inline int16x8_m128i ShiftRight(int16x8_m128i a, int offset) {
  123. return int16x8_m128i(_mm_srai_epi16(a.v, offset));
  124. }
  125. template <>
  126. inline __m128i SelectUsingMask(__m128i if_mask, __m128i then_val,
  127. __m128i else_val) {
  128. // borrowed from Intel's arm_neon_sse.h header.
  129. return _mm_or_si128(_mm_and_si128(if_mask, then_val),
  130. _mm_andnot_si128(if_mask, else_val));
  131. }
  132. template <>
  133. inline int16x8_m128i SelectUsingMask(int16x8_m128i if_mask,
  134. int16x8_m128i then_val,
  135. int16x8_m128i else_val) {
  136. // borrowed from Intel's arm_neon_sse.h header.
  137. return int16x8_m128i(SelectUsingMask(if_mask.v, then_val.v, else_val.v));
  138. }
  139. template <>
  140. inline __m128i MaskIfEqual(__m128i a, __m128i b) {
  141. return _mm_cmpeq_epi32(a, b);
  142. }
  143. template <>
  144. inline int16x8_m128i MaskIfEqual(int16x8_m128i a, int16x8_m128i b) {
  145. return int16x8_m128i(_mm_cmpeq_epi16(a.v, b.v));
  146. }
  147. template <>
  148. inline __m128i MaskIfNotEqual(__m128i a, __m128i b) {
  149. return BitNot(MaskIfEqual(a, b));
  150. }
  151. template <>
  152. inline int16x8_m128i MaskIfNotEqual(int16x8_m128i a, int16x8_m128i b) {
  153. return BitNot(MaskIfEqual(a, b));
  154. }
  155. template <>
  156. inline __m128i MaskIfZero(__m128i a) {
  157. return MaskIfEqual(a, _mm_set1_epi32(0));
  158. }
  159. template <>
  160. inline int16x8_m128i MaskIfZero(int16x8_m128i a) {
  161. return MaskIfEqual(a, int16x8_m128i(_mm_set1_epi16(0)));
  162. }
  163. template <>
  164. inline __m128i MaskIfNonZero(__m128i a) {
  165. return MaskIfNotEqual(a, _mm_set1_epi32(0));
  166. }
  167. template <>
  168. inline int16x8_m128i MaskIfNonZero(int16x8_m128i a) {
  169. return MaskIfNotEqual(a, int16x8_m128i(_mm_set1_epi16(0)));
  170. }
  171. template <>
  172. inline __m128i MaskIfGreaterThan(__m128i a, __m128i b) {
  173. return _mm_cmpgt_epi32(a, b);
  174. }
  175. template <>
  176. inline int16x8_m128i MaskIfGreaterThan(int16x8_m128i a, int16x8_m128i b) {
  177. return int16x8_m128i(_mm_cmpgt_epi16(a.v, b.v));
  178. }
  179. template <>
  180. inline __m128i MaskIfLessThan(__m128i a, __m128i b) {
  181. return _mm_cmplt_epi32(a, b);
  182. }
  183. template <>
  184. inline int16x8_m128i MaskIfLessThan(int16x8_m128i a, int16x8_m128i b) {
  185. return int16x8_m128i(_mm_cmplt_epi16(a.v, b.v));
  186. }
  187. template <>
  188. inline __m128i MaskIfGreaterThanOrEqual(__m128i a, __m128i b) {
  189. return BitNot(MaskIfLessThan(a, b));
  190. }
  191. template <>
  192. inline int16x8_m128i MaskIfGreaterThanOrEqual(int16x8_m128i a,
  193. int16x8_m128i b) {
  194. return BitNot(MaskIfLessThan(a, b));
  195. }
  196. template <>
  197. inline __m128i MaskIfLessThanOrEqual(__m128i a, __m128i b) {
  198. return BitNot(MaskIfGreaterThan(a, b));
  199. }
  200. template <>
  201. inline int16x8_m128i MaskIfLessThanOrEqual(int16x8_m128i a, int16x8_m128i b) {
  202. return BitNot(MaskIfGreaterThan(a, b));
  203. }
  204. /* Assumptions:
  205. - All and Any are used on masks.
  206. - masks are all_ones for true lanes, all_zeroes otherwise.
  207. Hence, All means all 128bits set, and Any means any bit set.
  208. */
  209. template <>
  210. inline bool All(__m128i a) {
  211. return _mm_testc_si128(a, a);
  212. }
  213. template <>
  214. inline bool All(int16x8_m128i a) {
  215. return _mm_testc_si128(a.v, a.v);
  216. }
  217. template <>
  218. inline bool Any(__m128i a) {
  219. return !_mm_testz_si128(a, a);
  220. }
  221. template <>
  222. inline bool Any(int16x8_m128i a) {
  223. return !_mm_testz_si128(a.v, a.v);
  224. }
  225. template <>
  226. inline __m128i RoundingHalfSum(__m128i a, __m128i b) {
  227. /* __m128i round_bit_mask, a_over_2, b_over_2, round_bit, sum; */
  228. /* We divide the inputs before the add to avoid the overflow and costly test
  229. */
  230. /* of checking if an overflow occured on signed add */
  231. /* round_bit_mask = _mm_set1_epi32(1); */
  232. /* a_over_2 = _mm_srai_epi32(a, 1); */
  233. /* b_over_2 = _mm_srai_epi32(b, 1); */
  234. /* sum = Add(a_over_2, b_over_2); */
  235. /* round_bit = _mm_sign_epi32(BitAnd(BitOr(a,b), round_bit_mask), sum); */
  236. /* return Add(sum, round_bit); */
  237. /* Other possibility detecting overflow and xor the sign if an overflow
  238. * happened*/
  239. __m128i one, sign_bit_mask, sum, rounded_half_sum, overflow, result;
  240. one = _mm_set1_epi32(1);
  241. sign_bit_mask = _mm_set1_epi32(0x80000000);
  242. sum = Add(a, b);
  243. rounded_half_sum = _mm_srai_epi32(Add(sum, one), 1);
  244. overflow =
  245. BitAnd(BitAnd(BitXor(a, rounded_half_sum), BitXor(b, rounded_half_sum)),
  246. sign_bit_mask);
  247. result = BitXor(rounded_half_sum, overflow);
  248. return result;
  249. }
  250. template <>
  251. inline int16x8_m128i RoundingHalfSum(int16x8_m128i a, int16x8_m128i b) {
  252. // Idea: go to unsigned to use _mm_avg_epu16,
  253. // borrowed from Intel's arm_neon_sse.h header.
  254. __m128i constant_neg_32768 = _mm_set1_epi16(-32768);
  255. __m128i a_unsigned = _mm_sub_epi16(a.v, constant_neg_32768);
  256. __m128i b_unsigned = _mm_sub_epi16(b.v, constant_neg_32768);
  257. __m128i avg_unsigned = _mm_avg_epu16(a_unsigned, b_unsigned);
  258. __m128i avg = _mm_add_epi16(avg_unsigned, constant_neg_32768);
  259. return int16x8_m128i(avg);
  260. }
  261. template <>
  262. inline __m128i SaturatingRoundingDoublingHighMul(__m128i a, __m128i b) {
  263. __m128i min, saturation_mask, a0_a2, a1_a3, b0_b2, b1_b3;
  264. __m128i a0b0_a2b2, a1b1_a3b3, a0b0_a2b2_rounded, a1b1_a3b3_rounded;
  265. __m128i a0b0_a2b2_rounded_2x, a1b1_a3b3_rounded_2x, result;
  266. __m128i nudge;
  267. // saturation only happen if a == b == INT_MIN
  268. min = _mm_set1_epi32(std::numeric_limits<std::int32_t>::min());
  269. saturation_mask = BitAnd(MaskIfEqual(a, b), MaskIfEqual(a, min));
  270. // a = a0 | a1 | a2 | a3
  271. // b = b0 | b1 | b2 | b3
  272. a0_a2 = a;
  273. a1_a3 = _mm_srli_si128(a, 4);
  274. b0_b2 = b;
  275. b1_b3 = _mm_srli_si128(b, 4);
  276. a0b0_a2b2 = _mm_mul_epi32(a0_a2, b0_b2);
  277. a1b1_a3b3 = _mm_mul_epi32(a1_a3, b1_b3);
  278. // do the rounding and take into account that it will be doubled
  279. nudge = _mm_set1_epi64x(1 << 30);
  280. a0b0_a2b2_rounded = _mm_add_epi64(a0b0_a2b2, nudge);
  281. a1b1_a3b3_rounded = _mm_add_epi64(a1b1_a3b3, nudge);
  282. // do the doubling
  283. a0b0_a2b2_rounded_2x = _mm_slli_epi64(a0b0_a2b2_rounded, 1);
  284. a1b1_a3b3_rounded_2x = _mm_slli_epi64(a1b1_a3b3_rounded, 1);
  285. // get the high part of the products
  286. result = _mm_blend_epi16(_mm_srli_si128(a0b0_a2b2_rounded_2x, 4),
  287. a1b1_a3b3_rounded_2x, 0xcc);
  288. // saturate those which overflowed
  289. return SelectUsingMask(saturation_mask, min, result);
  290. }
  291. template <>
  292. inline int16x8_m128i SaturatingRoundingDoublingHighMul(int16x8_m128i a,
  293. int16x8_m128i b) {
  294. // Idea: use _mm_mulhrs_epi16 then saturate with a bit-operation,
  295. // borrowed from Intel's arm_neon_sse.h header.
  296. __m128i result_unsaturated = _mm_mulhrs_epi16(a.v, b.v);
  297. __m128i saturation_mask =
  298. _mm_cmpeq_epi16(result_unsaturated, _mm_set1_epi16(0x8000));
  299. __m128i result = _mm_xor_si128(result_unsaturated, saturation_mask);
  300. return int16x8_m128i(result);
  301. }
  302. template <>
  303. inline __m128i Dup<__m128i>(std::int32_t x) {
  304. return _mm_set1_epi32(x);
  305. }
  306. template <>
  307. inline int16x8_m128i Dup<int16x8_m128i>(std::int16_t x) {
  308. return int16x8_m128i(_mm_set1_epi16(x));
  309. }
  310. // So far this is only needed for int16.
  311. template <>
  312. inline int16x8_m128i SaturatingAdd(int16x8_m128i a, int16x8_m128i b) {
  313. return int16x8_m128i(_mm_adds_epi16(a.v, b.v));
  314. }
  315. } // end namespace gemmlowp
  316. #endif // GEMMLOWP_INTERNAL_FIXEDPOINT_SSE_H_