Robobo
core_cm4_simd.h
Go to the documentation of this file.
1 /**************************************************************************/
24 #ifdef __cplusplus
25  extern "C" {
26 #endif
27 
28 #ifndef __CORE_CM4_SIMD_H
29 #define __CORE_CM4_SIMD_H
30 
31 
32 /*******************************************************************************
33  * Hardware Abstraction Layer
34  ******************************************************************************/
35 
36 
37 /* ################### Compiler specific Intrinsics ########################### */
43 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
44 /* ARM armcc specific functions */
45 
46 /*------ CM4 SOMD Intrinsics -----------------------------------------------------*/
47 #define __SADD8 __sadd8
48 #define __QADD8 __qadd8
49 #define __SHADD8 __shadd8
50 #define __UADD8 __uadd8
51 #define __UQADD8 __uqadd8
52 #define __UHADD8 __uhadd8
53 #define __SSUB8 __ssub8
54 #define __QSUB8 __qsub8
55 #define __SHSUB8 __shsub8
56 #define __USUB8 __usub8
57 #define __UQSUB8 __uqsub8
58 #define __UHSUB8 __uhsub8
59 #define __SADD16 __sadd16
60 #define __QADD16 __qadd16
61 #define __SHADD16 __shadd16
62 #define __UADD16 __uadd16
63 #define __UQADD16 __uqadd16
64 #define __UHADD16 __uhadd16
65 #define __SSUB16 __ssub16
66 #define __QSUB16 __qsub16
67 #define __SHSUB16 __shsub16
68 #define __USUB16 __usub16
69 #define __UQSUB16 __uqsub16
70 #define __UHSUB16 __uhsub16
71 #define __SASX __sasx
72 #define __QASX __qasx
73 #define __SHASX __shasx
74 #define __UASX __uasx
75 #define __UQASX __uqasx
76 #define __UHASX __uhasx
77 #define __SSAX __ssax
78 #define __QSAX __qsax
79 #define __SHSAX __shsax
80 #define __USAX __usax
81 #define __UQSAX __uqsax
82 #define __UHSAX __uhsax
83 #define __USAD8 __usad8
84 #define __USADA8 __usada8
85 #define __SSAT16 __ssat16
86 #define __USAT16 __usat16
87 #define __UXTB16 __uxtb16
88 #define __UXTAB16 __uxtab16
89 #define __SXTB16 __sxtb16
90 #define __SXTAB16 __sxtab16
91 #define __SMUAD __smuad
92 #define __SMUADX __smuadx
93 #define __SMLAD __smlad
94 #define __SMLADX __smladx
95 #define __SMLALD __smlald
96 #define __SMLALDX __smlaldx
97 #define __SMUSD __smusd
98 #define __SMUSDX __smusdx
99 #define __SMLSD __smlsd
100 #define __SMLSDX __smlsdx
101 #define __SMLSLD __smlsld
102 #define __SMLSLDX __smlsldx
103 #define __SEL __sel
104 #define __QADD __qadd
105 #define __QSUB __qsub
106 
107 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
108  ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
109 
110 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
111  ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
112 
113 
114 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
115 
116 
117 
118 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
119 /* IAR iccarm specific functions */
120 
121 #include <cmsis_iar.h>
122 
123 /*------ CM4 SIMDDSP Intrinsics -----------------------------------------------------*/
124 /* intrinsic __SADD8 see intrinsics.h */
125 /* intrinsic __QADD8 see intrinsics.h */
126 /* intrinsic __SHADD8 see intrinsics.h */
127 /* intrinsic __UADD8 see intrinsics.h */
128 /* intrinsic __UQADD8 see intrinsics.h */
129 /* intrinsic __UHADD8 see intrinsics.h */
130 /* intrinsic __SSUB8 see intrinsics.h */
131 /* intrinsic __QSUB8 see intrinsics.h */
132 /* intrinsic __SHSUB8 see intrinsics.h */
133 /* intrinsic __USUB8 see intrinsics.h */
134 /* intrinsic __UQSUB8 see intrinsics.h */
135 /* intrinsic __UHSUB8 see intrinsics.h */
136 /* intrinsic __SADD16 see intrinsics.h */
137 /* intrinsic __QADD16 see intrinsics.h */
138 /* intrinsic __SHADD16 see intrinsics.h */
139 /* intrinsic __UADD16 see intrinsics.h */
140 /* intrinsic __UQADD16 see intrinsics.h */
141 /* intrinsic __UHADD16 see intrinsics.h */
142 /* intrinsic __SSUB16 see intrinsics.h */
143 /* intrinsic __QSUB16 see intrinsics.h */
144 /* intrinsic __SHSUB16 see intrinsics.h */
145 /* intrinsic __USUB16 see intrinsics.h */
146 /* intrinsic __UQSUB16 see intrinsics.h */
147 /* intrinsic __UHSUB16 see intrinsics.h */
148 /* intrinsic __SASX see intrinsics.h */
149 /* intrinsic __QASX see intrinsics.h */
150 /* intrinsic __SHASX see intrinsics.h */
151 /* intrinsic __UASX see intrinsics.h */
152 /* intrinsic __UQASX see intrinsics.h */
153 /* intrinsic __UHASX see intrinsics.h */
154 /* intrinsic __SSAX see intrinsics.h */
155 /* intrinsic __QSAX see intrinsics.h */
156 /* intrinsic __SHSAX see intrinsics.h */
157 /* intrinsic __USAX see intrinsics.h */
158 /* intrinsic __UQSAX see intrinsics.h */
159 /* intrinsic __UHSAX see intrinsics.h */
160 /* intrinsic __USAD8 see intrinsics.h */
161 /* intrinsic __USADA8 see intrinsics.h */
162 /* intrinsic __SSAT16 see intrinsics.h */
163 /* intrinsic __USAT16 see intrinsics.h */
164 /* intrinsic __UXTB16 see intrinsics.h */
165 /* intrinsic __SXTB16 see intrinsics.h */
166 /* intrinsic __UXTAB16 see intrinsics.h */
167 /* intrinsic __SXTAB16 see intrinsics.h */
168 /* intrinsic __SMUAD see intrinsics.h */
169 /* intrinsic __SMUADX see intrinsics.h */
170 /* intrinsic __SMLAD see intrinsics.h */
171 /* intrinsic __SMLADX see intrinsics.h */
172 /* intrinsic __SMLALD see intrinsics.h */
173 /* intrinsic __SMLALDX see intrinsics.h */
174 /* intrinsic __SMUSD see intrinsics.h */
175 /* intrinsic __SMUSDX see intrinsics.h */
176 /* intrinsic __SMLSD see intrinsics.h */
177 /* intrinsic __SMLSDX see intrinsics.h */
178 /* intrinsic __SMLSLD see intrinsics.h */
179 /* intrinsic __SMLSLDX see intrinsics.h */
180 /* intrinsic __SEL see intrinsics.h */
181 /* intrinsic __QADD see intrinsics.h */
182 /* intrinsic __QSUB see intrinsics.h */
183 /* intrinsic __PKHBT see intrinsics.h */
184 /* intrinsic __PKHTB see intrinsics.h */
185 
186 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
187 
188 
189 
190 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
191 /* GNU gcc specific functions */
192 
193 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
194 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
195 {
196  uint32_t result;
197 
198  __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
199  return(result);
200 }
201 
202 __attribute__( ( always_inline ) ) static __INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
203 {
204  uint32_t result;
205 
206  __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
207  return(result);
208 }
209 
210 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
211 {
212  uint32_t result;
213 
214  __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
215  return(result);
216 }
217 
218 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
219 {
220  uint32_t result;
221 
222  __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
223  return(result);
224 }
225 
226 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
227 {
228  uint32_t result;
229 
230  __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
231  return(result);
232 }
233 
234 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
235 {
236  uint32_t result;
237 
238  __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
239  return(result);
240 }
241 
242 
243 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
244 {
245  uint32_t result;
246 
247  __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
248  return(result);
249 }
250 
251 __attribute__( ( always_inline ) ) static __INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
252 {
253  uint32_t result;
254 
255  __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
256  return(result);
257 }
258 
259 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
260 {
261  uint32_t result;
262 
263  __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
264  return(result);
265 }
266 
267 __attribute__( ( always_inline ) ) static __INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
268 {
269  uint32_t result;
270 
271  __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
272  return(result);
273 }
274 
275 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
276 {
277  uint32_t result;
278 
279  __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
280  return(result);
281 }
282 
283 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
284 {
285  uint32_t result;
286 
287  __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
288  return(result);
289 }
290 
291 
292 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
293 {
294  uint32_t result;
295 
296  __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
297  return(result);
298 }
299 
300 __attribute__( ( always_inline ) ) static __INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
301 {
302  uint32_t result;
303 
304  __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
305  return(result);
306 }
307 
308 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
309 {
310  uint32_t result;
311 
312  __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
313  return(result);
314 }
315 
316 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
317 {
318  uint32_t result;
319 
320  __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
321  return(result);
322 }
323 
324 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
325 {
326  uint32_t result;
327 
328  __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
329  return(result);
330 }
331 
332 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
333 {
334  uint32_t result;
335 
336  __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
337  return(result);
338 }
339 
340 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
341 {
342  uint32_t result;
343 
344  __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
345  return(result);
346 }
347 
348 __attribute__( ( always_inline ) ) static __INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
349 {
350  uint32_t result;
351 
352  __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
353  return(result);
354 }
355 
356 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
357 {
358  uint32_t result;
359 
360  __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
361  return(result);
362 }
363 
364 __attribute__( ( always_inline ) ) static __INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
365 {
366  uint32_t result;
367 
368  __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
369  return(result);
370 }
371 
372 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
373 {
374  uint32_t result;
375 
376  __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
377  return(result);
378 }
379 
380 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
381 {
382  uint32_t result;
383 
384  __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
385  return(result);
386 }
387 
388 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
389 {
390  uint32_t result;
391 
392  __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
393  return(result);
394 }
395 
396 __attribute__( ( always_inline ) ) static __INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
397 {
398  uint32_t result;
399 
400  __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
401  return(result);
402 }
403 
404 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
405 {
406  uint32_t result;
407 
408  __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
409  return(result);
410 }
411 
412 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
413 {
414  uint32_t result;
415 
416  __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
417  return(result);
418 }
419 
420 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
421 {
422  uint32_t result;
423 
424  __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
425  return(result);
426 }
427 
428 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
429 {
430  uint32_t result;
431 
432  __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
433  return(result);
434 }
435 
436 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
437 {
438  uint32_t result;
439 
440  __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
441  return(result);
442 }
443 
444 __attribute__( ( always_inline ) ) static __INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
445 {
446  uint32_t result;
447 
448  __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
449  return(result);
450 }
451 
452 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
453 {
454  uint32_t result;
455 
456  __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
457  return(result);
458 }
459 
460 __attribute__( ( always_inline ) ) static __INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
461 {
462  uint32_t result;
463 
464  __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
465  return(result);
466 }
467 
468 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
469 {
470  uint32_t result;
471 
472  __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
473  return(result);
474 }
475 
476 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
477 {
478  uint32_t result;
479 
480  __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
481  return(result);
482 }
483 
484 __attribute__( ( always_inline ) ) static __INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
485 {
486  uint32_t result;
487 
488  __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
489  return(result);
490 }
491 
492 __attribute__( ( always_inline ) ) static __INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
493 {
494  uint32_t result;
495 
496  __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
497  return(result);
498 }
499 
500 #define __SSAT16(ARG1,ARG2) \
501 ({ \
502  uint32_t __RES, __ARG1 = (ARG1); \
503  __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
504  __RES; \
505  })
506 
507 #define __USAT16(ARG1,ARG2) \
508 ({ \
509  uint32_t __RES, __ARG1 = (ARG1); \
510  __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
511  __RES; \
512  })
513 
514 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UXTB16(uint32_t op1)
515 {
516  uint32_t result;
517 
518  __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
519  return(result);
520 }
521 
522 __attribute__( ( always_inline ) ) static __INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
523 {
524  uint32_t result;
525 
526  __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
527  return(result);
528 }
529 
530 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SXTB16(uint32_t op1)
531 {
532  uint32_t result;
533 
534  __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
535  return(result);
536 }
537 
538 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
539 {
540  uint32_t result;
541 
542  __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
543  return(result);
544 }
545 
546 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
547 {
548  uint32_t result;
549 
550  __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
551  return(result);
552 }
553 
554 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
555 {
556  uint32_t result;
557 
558  __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
559  return(result);
560 }
561 
562 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
563 {
564  uint32_t result;
565 
566  __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
567  return(result);
568 }
569 
570 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
571 {
572  uint32_t result;
573 
574  __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
575  return(result);
576 }
577 
578 #define __SMLALD(ARG1,ARG2,ARG3) \
579 ({ \
580  uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
581  __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
582  (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
583  })
584 
585 #define __SMLALDX(ARG1,ARG2,ARG3) \
586 ({ \
587  uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
588  __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
589  (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
590  })
591 
592 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
593 {
594  uint32_t result;
595 
596  __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
597  return(result);
598 }
599 
600 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
601 {
602  uint32_t result;
603 
604  __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
605  return(result);
606 }
607 
608 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
609 {
610  uint32_t result;
611 
612  __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
613  return(result);
614 }
615 
616 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
617 {
618  uint32_t result;
619 
620  __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
621  return(result);
622 }
623 
624 #define __SMLSLD(ARG1,ARG2,ARG3) \
625 ({ \
626  uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
627  __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
628  (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
629  })
630 
631 #define __SMLSLDX(ARG1,ARG2,ARG3) \
632 ({ \
633  uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
634  __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
635  (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
636  })
637 
638 __attribute__( ( always_inline ) ) static __INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
639 {
640  uint32_t result;
641 
642  __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
643  return(result);
644 }
645 
646 __attribute__( ( always_inline ) ) static __INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
647 {
648  uint32_t result;
649 
650  __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
651  return(result);
652 }
653 
654 __attribute__( ( always_inline ) ) static __INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
655 {
656  uint32_t result;
657 
658  __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
659  return(result);
660 }
661 
662 #define __PKHBT(ARG1,ARG2,ARG3) \
663 ({ \
664  uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
665  __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
666  __RES; \
667  })
668 
669 #define __PKHTB(ARG1,ARG2,ARG3) \
670 ({ \
671  uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
672  if (ARG3 == 0) \
673  __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
674  else \
675  __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
676  __RES; \
677  })
678 
679 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
680 
681 
682 
683 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
684 /* TASKING carm specific functions */
685 
686 
687 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
688 /* not yet supported */
689 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
690 
691 
692 #endif
693 
697 #endif /* __CORE_CM4_SIMD_H */
698 
699 #ifdef __cplusplus
700 }
701 #endif
struct emac_rx_descriptor __attribute__((packed, aligned(8))) emac_rx_descriptor_t