Keyboard firmwares for Atmel AVR and Cortex-M
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

arm_correlate_fast_opt_q15.c 13KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. /* ----------------------------------------------------------------------
  2. * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
  3. *
  4. * $Date: 17. January 2013
  5. * $Revision: V1.4.1
  6. *
  7. * Project: CMSIS DSP Library
  8. * Title: arm_correlate_fast_opt_q15.c
  9. *
  10. * Description: Fast Q15 Correlation.
  11. *
  12. * Target Processor: Cortex-M4/Cortex-M3
  13. *
  14. * Redistribution and use in source and binary forms, with or without
  15. * modification, are permitted provided that the following conditions
  16. * are met:
  17. * - Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * - Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. * - Neither the name of ARM LIMITED nor the names of its contributors
  24. * may be used to endorse or promote products derived from this
  25. * software without specific prior written permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  30. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  31. * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  32. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  33. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  34. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  35. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  36. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  37. * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  38. * POSSIBILITY OF SUCH DAMAGE.
  39. * -------------------------------------------------------------------- */
  40. #include "arm_math.h"
  41. /**
  42. * @ingroup groupFilters
  43. */
  44. /**
  45. * @addtogroup Corr
  46. * @{
  47. */
  48. /**
  49. * @brief Correlation of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4.
  50. * @param[in] *pSrcA points to the first input sequence.
  51. * @param[in] srcALen length of the first input sequence.
  52. * @param[in] *pSrcB points to the second input sequence.
  53. * @param[in] srcBLen length of the second input sequence.
  54. * @param[out] *pDst points to the location where the output result is written. Length 2 * max(srcALen, srcBLen) - 1.
  55. * @param[in] *pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
  56. * @return none.
  57. *
  58. *
  59. * \par Restrictions
  60. * If the silicon does not support unaligned memory access enable the macro UNALIGNED_SUPPORT_DISABLE
  61. * In this case input, output, scratch buffers should be aligned by 32-bit
  62. *
  63. *
  64. * <b>Scaling and Overflow Behavior:</b>
  65. *
  66. * \par
  67. * This fast version uses a 32-bit accumulator with 2.30 format.
  68. * The accumulator maintains full precision of the intermediate multiplication results but provides only a single guard bit.
  69. * There is no saturation on intermediate additions.
  70. * Thus, if the accumulator overflows it wraps around and distorts the result.
  71. * The input signals should be scaled down to avoid intermediate overflows.
  72. * Scale down one of the inputs by 1/min(srcALen, srcBLen) to avoid overflow since a
  73. * maximum of min(srcALen, srcBLen) number of additions is carried internally.
  74. * The 2.30 accumulator is right shifted by 15 bits and then saturated to 1.15 format to yield the final result.
  75. *
  76. * \par
  77. * See <code>arm_correlate_q15()</code> for a slower implementation of this function which uses a 64-bit accumulator to avoid wrap around distortion.
  78. */
  79. void arm_correlate_fast_opt_q15(
  80. q15_t * pSrcA,
  81. uint32_t srcALen,
  82. q15_t * pSrcB,
  83. uint32_t srcBLen,
  84. q15_t * pDst,
  85. q15_t * pScratch)
  86. {
  87. q15_t *pIn1; /* inputA pointer */
  88. q15_t *pIn2; /* inputB pointer */
  89. q31_t acc0, acc1, acc2, acc3; /* Accumulators */
  90. q15_t *py; /* Intermediate inputB pointer */
  91. q31_t x1, x2, x3; /* temporary variables for holding input and coefficient values */
  92. uint32_t j, blkCnt, outBlockSize; /* loop counter */
  93. int32_t inc = 1; /* Destination address modifier */
  94. uint32_t tapCnt;
  95. q31_t y1, y2;
  96. q15_t *pScr; /* Intermediate pointers */
  97. q15_t *pOut = pDst; /* output pointer */
  98. #ifdef UNALIGNED_SUPPORT_DISABLE
  99. q15_t a, b;
  100. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  101. /* The algorithm implementation is based on the lengths of the inputs. */
  102. /* srcB is always made to slide across srcA. */
  103. /* So srcBLen is always considered as shorter or equal to srcALen */
  104. /* But CORR(x, y) is reverse of CORR(y, x) */
  105. /* So, when srcBLen > srcALen, output pointer is made to point to the end of the output buffer */
  106. /* and the destination pointer modifier, inc is set to -1 */
  107. /* If srcALen > srcBLen, zero pad has to be done to srcB to make the two inputs of same length */
  108. /* But to improve the performance,
  109. * we include zeroes in the output instead of zero padding either of the the inputs*/
  110. /* If srcALen > srcBLen,
  111. * (srcALen - srcBLen) zeroes has to included in the starting of the output buffer */
  112. /* If srcALen < srcBLen,
  113. * (srcALen - srcBLen) zeroes has to included in the ending of the output buffer */
  114. if(srcALen >= srcBLen)
  115. {
  116. /* Initialization of inputA pointer */
  117. pIn1 = (pSrcA);
  118. /* Initialization of inputB pointer */
  119. pIn2 = (pSrcB);
  120. /* Number of output samples is calculated */
  121. outBlockSize = (2u * srcALen) - 1u;
  122. /* When srcALen > srcBLen, zero padding is done to srcB
  123. * to make their lengths equal.
  124. * Instead, (outBlockSize - (srcALen + srcBLen - 1))
  125. * number of output samples are made zero */
  126. j = outBlockSize - (srcALen + (srcBLen - 1u));
  127. /* Updating the pointer position to non zero value */
  128. pOut += j;
  129. }
  130. else
  131. {
  132. /* Initialization of inputA pointer */
  133. pIn1 = (pSrcB);
  134. /* Initialization of inputB pointer */
  135. pIn2 = (pSrcA);
  136. /* srcBLen is always considered as shorter or equal to srcALen */
  137. j = srcBLen;
  138. srcBLen = srcALen;
  139. srcALen = j;
  140. /* CORR(x, y) = Reverse order(CORR(y, x)) */
  141. /* Hence set the destination pointer to point to the last output sample */
  142. pOut = pDst + ((srcALen + srcBLen) - 2u);
  143. /* Destination address modifier is set to -1 */
  144. inc = -1;
  145. }
  146. pScr = pScratch;
  147. /* Fill (srcBLen - 1u) zeros in scratch buffer */
  148. arm_fill_q15(0, pScr, (srcBLen - 1u));
  149. /* Update temporary scratch pointer */
  150. pScr += (srcBLen - 1u);
  151. #ifndef UNALIGNED_SUPPORT_DISABLE
  152. /* Copy (srcALen) samples in scratch buffer */
  153. arm_copy_q15(pIn1, pScr, srcALen);
  154. /* Update pointers */
  155. pScr += srcALen;
  156. #else
  157. /* Apply loop unrolling and do 4 Copies simultaneously. */
  158. j = srcALen >> 2u;
  159. /* First part of the processing with loop unrolling copies 4 data points at a time.
  160. ** a second loop below copies for the remaining 1 to 3 samples. */
  161. while(j > 0u)
  162. {
  163. /* copy second buffer in reversal manner */
  164. *pScr++ = *pIn1++;
  165. *pScr++ = *pIn1++;
  166. *pScr++ = *pIn1++;
  167. *pScr++ = *pIn1++;
  168. /* Decrement the loop counter */
  169. j--;
  170. }
  171. /* If the count is not a multiple of 4, copy remaining samples here.
  172. ** No loop unrolling is used. */
  173. j = srcALen % 0x4u;
  174. while(j > 0u)
  175. {
  176. /* copy second buffer in reversal manner for remaining samples */
  177. *pScr++ = *pIn1++;
  178. /* Decrement the loop counter */
  179. j--;
  180. }
  181. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  182. #ifndef UNALIGNED_SUPPORT_DISABLE
  183. /* Fill (srcBLen - 1u) zeros at end of scratch buffer */
  184. arm_fill_q15(0, pScr, (srcBLen - 1u));
  185. /* Update pointer */
  186. pScr += (srcBLen - 1u);
  187. #else
  188. /* Apply loop unrolling and do 4 Copies simultaneously. */
  189. j = (srcBLen - 1u) >> 2u;
  190. /* First part of the processing with loop unrolling copies 4 data points at a time.
  191. ** a second loop below copies for the remaining 1 to 3 samples. */
  192. while(j > 0u)
  193. {
  194. /* copy second buffer in reversal manner */
  195. *pScr++ = 0;
  196. *pScr++ = 0;
  197. *pScr++ = 0;
  198. *pScr++ = 0;
  199. /* Decrement the loop counter */
  200. j--;
  201. }
  202. /* If the count is not a multiple of 4, copy remaining samples here.
  203. ** No loop unrolling is used. */
  204. j = (srcBLen - 1u) % 0x4u;
  205. while(j > 0u)
  206. {
  207. /* copy second buffer in reversal manner for remaining samples */
  208. *pScr++ = 0;
  209. /* Decrement the loop counter */
  210. j--;
  211. }
  212. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  213. /* Temporary pointer for scratch2 */
  214. py = pIn2;
  215. /* Actual correlation process starts here */
  216. blkCnt = (srcALen + srcBLen - 1u) >> 2;
  217. while(blkCnt > 0)
  218. {
  219. /* Initialze temporary scratch pointer as scratch1 */
  220. pScr = pScratch;
  221. /* Clear Accumlators */
  222. acc0 = 0;
  223. acc1 = 0;
  224. acc2 = 0;
  225. acc3 = 0;
  226. /* Read four samples from scratch1 buffer */
  227. x1 = *__SIMD32(pScr)++;
  228. /* Read next four samples from scratch1 buffer */
  229. x2 = *__SIMD32(pScr)++;
  230. tapCnt = (srcBLen) >> 2u;
  231. while(tapCnt > 0u)
  232. {
  233. #ifndef UNALIGNED_SUPPORT_DISABLE
  234. /* Read four samples from smaller buffer */
  235. y1 = _SIMD32_OFFSET(pIn2);
  236. y2 = _SIMD32_OFFSET(pIn2 + 2u);
  237. acc0 = __SMLAD(x1, y1, acc0);
  238. acc2 = __SMLAD(x2, y1, acc2);
  239. #ifndef ARM_MATH_BIG_ENDIAN
  240. x3 = __PKHBT(x2, x1, 0);
  241. #else
  242. x3 = __PKHBT(x1, x2, 0);
  243. #endif
  244. acc1 = __SMLADX(x3, y1, acc1);
  245. x1 = _SIMD32_OFFSET(pScr);
  246. acc0 = __SMLAD(x2, y2, acc0);
  247. acc2 = __SMLAD(x1, y2, acc2);
  248. #ifndef ARM_MATH_BIG_ENDIAN
  249. x3 = __PKHBT(x1, x2, 0);
  250. #else
  251. x3 = __PKHBT(x2, x1, 0);
  252. #endif
  253. acc3 = __SMLADX(x3, y1, acc3);
  254. acc1 = __SMLADX(x3, y2, acc1);
  255. x2 = _SIMD32_OFFSET(pScr + 2u);
  256. #ifndef ARM_MATH_BIG_ENDIAN
  257. x3 = __PKHBT(x2, x1, 0);
  258. #else
  259. x3 = __PKHBT(x1, x2, 0);
  260. #endif
  261. acc3 = __SMLADX(x3, y2, acc3);
  262. #else
  263. /* Read four samples from smaller buffer */
  264. a = *pIn2;
  265. b = *(pIn2 + 1);
  266. #ifndef ARM_MATH_BIG_ENDIAN
  267. y1 = __PKHBT(a, b, 16);
  268. #else
  269. y1 = __PKHBT(b, a, 16);
  270. #endif
  271. a = *(pIn2 + 2);
  272. b = *(pIn2 + 3);
  273. #ifndef ARM_MATH_BIG_ENDIAN
  274. y2 = __PKHBT(a, b, 16);
  275. #else
  276. y2 = __PKHBT(b, a, 16);
  277. #endif
  278. acc0 = __SMLAD(x1, y1, acc0);
  279. acc2 = __SMLAD(x2, y1, acc2);
  280. #ifndef ARM_MATH_BIG_ENDIAN
  281. x3 = __PKHBT(x2, x1, 0);
  282. #else
  283. x3 = __PKHBT(x1, x2, 0);
  284. #endif
  285. acc1 = __SMLADX(x3, y1, acc1);
  286. a = *pScr;
  287. b = *(pScr + 1);
  288. #ifndef ARM_MATH_BIG_ENDIAN
  289. x1 = __PKHBT(a, b, 16);
  290. #else
  291. x1 = __PKHBT(b, a, 16);
  292. #endif
  293. acc0 = __SMLAD(x2, y2, acc0);
  294. acc2 = __SMLAD(x1, y2, acc2);
  295. #ifndef ARM_MATH_BIG_ENDIAN
  296. x3 = __PKHBT(x1, x2, 0);
  297. #else
  298. x3 = __PKHBT(x2, x1, 0);
  299. #endif
  300. acc3 = __SMLADX(x3, y1, acc3);
  301. acc1 = __SMLADX(x3, y2, acc1);
  302. a = *(pScr + 2);
  303. b = *(pScr + 3);
  304. #ifndef ARM_MATH_BIG_ENDIAN
  305. x2 = __PKHBT(a, b, 16);
  306. #else
  307. x2 = __PKHBT(b, a, 16);
  308. #endif
  309. #ifndef ARM_MATH_BIG_ENDIAN
  310. x3 = __PKHBT(x2, x1, 0);
  311. #else
  312. x3 = __PKHBT(x1, x2, 0);
  313. #endif
  314. acc3 = __SMLADX(x3, y2, acc3);
  315. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  316. pIn2 += 4u;
  317. pScr += 4u;
  318. /* Decrement the loop counter */
  319. tapCnt--;
  320. }
  321. /* Update scratch pointer for remaining samples of smaller length sequence */
  322. pScr -= 4u;
  323. /* apply same above for remaining samples of smaller length sequence */
  324. tapCnt = (srcBLen) & 3u;
  325. while(tapCnt > 0u)
  326. {
  327. /* accumlate the results */
  328. acc0 += (*pScr++ * *pIn2);
  329. acc1 += (*pScr++ * *pIn2);
  330. acc2 += (*pScr++ * *pIn2);
  331. acc3 += (*pScr++ * *pIn2++);
  332. pScr -= 3u;
  333. /* Decrement the loop counter */
  334. tapCnt--;
  335. }
  336. blkCnt--;
  337. /* Store the results in the accumulators in the destination buffer. */
  338. *pOut = (__SSAT(acc0 >> 15u, 16));
  339. pOut += inc;
  340. *pOut = (__SSAT(acc1 >> 15u, 16));
  341. pOut += inc;
  342. *pOut = (__SSAT(acc2 >> 15u, 16));
  343. pOut += inc;
  344. *pOut = (__SSAT(acc3 >> 15u, 16));
  345. pOut += inc;
  346. /* Initialization of inputB pointer */
  347. pIn2 = py;
  348. pScratch += 4u;
  349. }
  350. blkCnt = (srcALen + srcBLen - 1u) & 0x3;
  351. /* Calculate correlation for remaining samples of Bigger length sequence */
  352. while(blkCnt > 0)
  353. {
  354. /* Initialze temporary scratch pointer as scratch1 */
  355. pScr = pScratch;
  356. /* Clear Accumlators */
  357. acc0 = 0;
  358. tapCnt = (srcBLen) >> 1u;
  359. while(tapCnt > 0u)
  360. {
  361. acc0 += (*pScr++ * *pIn2++);
  362. acc0 += (*pScr++ * *pIn2++);
  363. /* Decrement the loop counter */
  364. tapCnt--;
  365. }
  366. tapCnt = (srcBLen) & 1u;
  367. /* apply same above for remaining samples of smaller length sequence */
  368. while(tapCnt > 0u)
  369. {
  370. /* accumlate the results */
  371. acc0 += (*pScr++ * *pIn2++);
  372. /* Decrement the loop counter */
  373. tapCnt--;
  374. }
  375. blkCnt--;
  376. /* Store the result in the accumulator in the destination buffer. */
  377. *pOut = (q15_t) (__SSAT((acc0 >> 15), 16));
  378. pOut += inc;
  379. /* Initialization of inputB pointer */
  380. pIn2 = py;
  381. pScratch += 1u;
  382. }
  383. }
  384. /**
  385. * @} end of Corr group
  386. */