Mercurial > public > ostc4
comparison Common/Drivers/CMSIS/Include/cmsis_gcc.h @ 128:c78bcbd5deda FlipDisplay
Added current STM32 standandard libraries in version independend folder structure
| author | Ideenmodellierer |
|---|---|
| date | Sun, 17 Feb 2019 21:12:22 +0100 |
| parents | |
| children | bad5561c0c59 |
comparison
equal
deleted
inserted
replaced
| 127:1369f8660eaa | 128:c78bcbd5deda |
|---|---|
| 1 /**************************************************************************//** | |
| 2 * @file cmsis_gcc.h | |
| 3 * @brief CMSIS Cortex-M Core Function/Instruction Header File | |
| 4 * @version V4.30 | |
| 5 * @date 20. October 2015 | |
| 6 ******************************************************************************/ | |
| 7 /* Copyright (c) 2009 - 2015 ARM LIMITED | |
| 8 | |
| 9 All rights reserved. | |
| 10 Redistribution and use in source and binary forms, with or without | |
| 11 modification, are permitted provided that the following conditions are met: | |
| 12 - Redistributions of source code must retain the above copyright | |
| 13 notice, this list of conditions and the following disclaimer. | |
| 14 - Redistributions in binary form must reproduce the above copyright | |
| 15 notice, this list of conditions and the following disclaimer in the | |
| 16 documentation and/or other materials provided with the distribution. | |
| 17 - Neither the name of ARM nor the names of its contributors may be used | |
| 18 to endorse or promote products derived from this software without | |
| 19 specific prior written permission. | |
| 20 * | |
| 21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
| 22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
| 23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
| 24 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE | |
| 25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
| 26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
| 27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
| 28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
| 29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
| 30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
| 31 POSSIBILITY OF SUCH DAMAGE. | |
| 32 ---------------------------------------------------------------------------*/ | |
| 33 | |
| 34 | |
| 35 #ifndef __CMSIS_GCC_H | |
| 36 #define __CMSIS_GCC_H | |
| 37 | |
| 38 /* ignore some GCC warnings */ | |
| 39 #if defined ( __GNUC__ ) | |
| 40 #pragma GCC diagnostic push | |
| 41 #pragma GCC diagnostic ignored "-Wsign-conversion" | |
| 42 #pragma GCC diagnostic ignored "-Wconversion" | |
| 43 #pragma GCC diagnostic ignored "-Wunused-parameter" | |
| 44 #endif | |
| 45 | |
| 46 | |
| 47 /* ########################### Core Function Access ########################### */ | |
| 48 /** \ingroup CMSIS_Core_FunctionInterface | |
| 49 \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions | |
| 50 @{ | |
| 51 */ | |
| 52 | |
| 53 /** | |
| 54 \brief Enable IRQ Interrupts | |
| 55 \details Enables IRQ interrupts by clearing the I-bit in the CPSR. | |
| 56 Can only be executed in Privileged modes. | |
| 57 */ | |
| 58 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void) | |
| 59 { | |
| 60 __ASM volatile ("cpsie i" : : : "memory"); | |
| 61 } | |
| 62 | |
| 63 | |
| 64 /** | |
| 65 \brief Disable IRQ Interrupts | |
| 66 \details Disables IRQ interrupts by setting the I-bit in the CPSR. | |
| 67 Can only be executed in Privileged modes. | |
| 68 */ | |
| 69 __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void) | |
| 70 { | |
| 71 __ASM volatile ("cpsid i" : : : "memory"); | |
| 72 } | |
| 73 | |
| 74 | |
| 75 /** | |
| 76 \brief Get Control Register | |
| 77 \details Returns the content of the Control Register. | |
| 78 \return Control Register value | |
| 79 */ | |
| 80 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void) | |
| 81 { | |
| 82 uint32_t result; | |
| 83 | |
| 84 __ASM volatile ("MRS %0, control" : "=r" (result) ); | |
| 85 return(result); | |
| 86 } | |
| 87 | |
| 88 | |
| 89 /** | |
| 90 \brief Set Control Register | |
| 91 \details Writes the given value to the Control Register. | |
| 92 \param [in] control Control Register value to set | |
| 93 */ | |
| 94 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control) | |
| 95 { | |
| 96 __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); | |
| 97 } | |
| 98 | |
| 99 | |
| 100 /** | |
| 101 \brief Get IPSR Register | |
| 102 \details Returns the content of the IPSR Register. | |
| 103 \return IPSR Register value | |
| 104 */ | |
| 105 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void) | |
| 106 { | |
| 107 uint32_t result; | |
| 108 | |
| 109 __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); | |
| 110 return(result); | |
| 111 } | |
| 112 | |
| 113 | |
| 114 /** | |
| 115 \brief Get APSR Register | |
| 116 \details Returns the content of the APSR Register. | |
| 117 \return APSR Register value | |
| 118 */ | |
| 119 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void) | |
| 120 { | |
| 121 uint32_t result; | |
| 122 | |
| 123 __ASM volatile ("MRS %0, apsr" : "=r" (result) ); | |
| 124 return(result); | |
| 125 } | |
| 126 | |
| 127 | |
| 128 /** | |
| 129 \brief Get xPSR Register | |
| 130 \details Returns the content of the xPSR Register. | |
| 131 | |
| 132 \return xPSR Register value | |
| 133 */ | |
| 134 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void) | |
| 135 { | |
| 136 uint32_t result; | |
| 137 | |
| 138 __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); | |
| 139 return(result); | |
| 140 } | |
| 141 | |
| 142 | |
| 143 /** | |
| 144 \brief Get Process Stack Pointer | |
| 145 \details Returns the current value of the Process Stack Pointer (PSP). | |
| 146 \return PSP Register value | |
| 147 */ | |
| 148 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void) | |
| 149 { | |
| 150 register uint32_t result; | |
| 151 | |
| 152 __ASM volatile ("MRS %0, psp\n" : "=r" (result) ); | |
| 153 return(result); | |
| 154 } | |
| 155 | |
| 156 | |
| 157 /** | |
| 158 \brief Set Process Stack Pointer | |
| 159 \details Assigns the given value to the Process Stack Pointer (PSP). | |
| 160 \param [in] topOfProcStack Process Stack Pointer value to set | |
| 161 */ | |
| 162 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) | |
| 163 { | |
| 164 __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp"); | |
| 165 } | |
| 166 | |
| 167 | |
| 168 /** | |
| 169 \brief Get Main Stack Pointer | |
| 170 \details Returns the current value of the Main Stack Pointer (MSP). | |
| 171 \return MSP Register value | |
| 172 */ | |
| 173 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void) | |
| 174 { | |
| 175 register uint32_t result; | |
| 176 | |
| 177 __ASM volatile ("MRS %0, msp\n" : "=r" (result) ); | |
| 178 return(result); | |
| 179 } | |
| 180 | |
| 181 | |
| 182 /** | |
| 183 \brief Set Main Stack Pointer | |
| 184 \details Assigns the given value to the Main Stack Pointer (MSP). | |
| 185 | |
| 186 \param [in] topOfMainStack Main Stack Pointer value to set | |
| 187 */ | |
| 188 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) | |
| 189 { | |
| 190 __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp"); | |
| 191 } | |
| 192 | |
| 193 | |
| 194 /** | |
| 195 \brief Get Priority Mask | |
| 196 \details Returns the current state of the priority mask bit from the Priority Mask Register. | |
| 197 \return Priority Mask value | |
| 198 */ | |
| 199 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void) | |
| 200 { | |
| 201 uint32_t result; | |
| 202 | |
| 203 __ASM volatile ("MRS %0, primask" : "=r" (result) ); | |
| 204 return(result); | |
| 205 } | |
| 206 | |
| 207 | |
| 208 /** | |
| 209 \brief Set Priority Mask | |
| 210 \details Assigns the given value to the Priority Mask Register. | |
| 211 \param [in] priMask Priority Mask | |
| 212 */ | |
| 213 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask) | |
| 214 { | |
| 215 __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); | |
| 216 } | |
| 217 | |
| 218 | |
| 219 #if (__CORTEX_M >= 0x03U) | |
| 220 | |
| 221 /** | |
| 222 \brief Enable FIQ | |
| 223 \details Enables FIQ interrupts by clearing the F-bit in the CPSR. | |
| 224 Can only be executed in Privileged modes. | |
| 225 */ | |
| 226 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void) | |
| 227 { | |
| 228 __ASM volatile ("cpsie f" : : : "memory"); | |
| 229 } | |
| 230 | |
| 231 | |
| 232 /** | |
| 233 \brief Disable FIQ | |
| 234 \details Disables FIQ interrupts by setting the F-bit in the CPSR. | |
| 235 Can only be executed in Privileged modes. | |
| 236 */ | |
| 237 __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void) | |
| 238 { | |
| 239 __ASM volatile ("cpsid f" : : : "memory"); | |
| 240 } | |
| 241 | |
| 242 | |
| 243 /** | |
| 244 \brief Get Base Priority | |
| 245 \details Returns the current value of the Base Priority register. | |
| 246 \return Base Priority register value | |
| 247 */ | |
| 248 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void) | |
| 249 { | |
| 250 uint32_t result; | |
| 251 | |
| 252 __ASM volatile ("MRS %0, basepri" : "=r" (result) ); | |
| 253 return(result); | |
| 254 } | |
| 255 | |
| 256 | |
| 257 /** | |
| 258 \brief Set Base Priority | |
| 259 \details Assigns the given value to the Base Priority register. | |
| 260 \param [in] basePri Base Priority value to set | |
| 261 */ | |
| 262 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value) | |
| 263 { | |
| 264 __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory"); | |
| 265 } | |
| 266 | |
| 267 | |
| 268 /** | |
| 269 \brief Set Base Priority with condition | |
| 270 \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, | |
| 271 or the new value increases the BASEPRI priority level. | |
| 272 \param [in] basePri Base Priority value to set | |
| 273 */ | |
| 274 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value) | |
| 275 { | |
| 276 __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory"); | |
| 277 } | |
| 278 | |
| 279 | |
| 280 /** | |
| 281 \brief Get Fault Mask | |
| 282 \details Returns the current value of the Fault Mask register. | |
| 283 \return Fault Mask register value | |
| 284 */ | |
| 285 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void) | |
| 286 { | |
| 287 uint32_t result; | |
| 288 | |
| 289 __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); | |
| 290 return(result); | |
| 291 } | |
| 292 | |
| 293 | |
| 294 /** | |
| 295 \brief Set Fault Mask | |
| 296 \details Assigns the given value to the Fault Mask register. | |
| 297 \param [in] faultMask Fault Mask value to set | |
| 298 */ | |
| 299 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) | |
| 300 { | |
| 301 __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); | |
| 302 } | |
| 303 | |
| 304 #endif /* (__CORTEX_M >= 0x03U) */ | |
| 305 | |
| 306 | |
| 307 #if (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U) | |
| 308 | |
| 309 /** | |
| 310 \brief Get FPSCR | |
| 311 \details Returns the current value of the Floating Point Status/Control register. | |
| 312 \return Floating Point Status/Control register value | |
| 313 */ | |
| 314 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void) | |
| 315 { | |
| 316 #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U) | |
| 317 uint32_t result; | |
| 318 | |
| 319 /* Empty asm statement works as a scheduling barrier */ | |
| 320 __ASM volatile (""); | |
| 321 __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); | |
| 322 __ASM volatile (""); | |
| 323 return(result); | |
| 324 #else | |
| 325 return(0); | |
| 326 #endif | |
| 327 } | |
| 328 | |
| 329 | |
| 330 /** | |
| 331 \brief Set FPSCR | |
| 332 \details Assigns the given value to the Floating Point Status/Control register. | |
| 333 \param [in] fpscr Floating Point Status/Control value to set | |
| 334 */ | |
| 335 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr) | |
| 336 { | |
| 337 #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U) | |
| 338 /* Empty asm statement works as a scheduling barrier */ | |
| 339 __ASM volatile (""); | |
| 340 __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc"); | |
| 341 __ASM volatile (""); | |
| 342 #endif | |
| 343 } | |
| 344 | |
| 345 #endif /* (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U) */ | |
| 346 | |
| 347 | |
| 348 | |
| 349 /*@} end of CMSIS_Core_RegAccFunctions */ | |
| 350 | |
| 351 | |
| 352 /* ########################## Core Instruction Access ######################### */ | |
| 353 /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface | |
| 354 Access to dedicated instructions | |
| 355 @{ | |
| 356 */ | |
| 357 | |
| 358 /* Define macros for porting to both thumb1 and thumb2. | |
| 359 * For thumb1, use low register (r0-r7), specified by constraint "l" | |
| 360 * Otherwise, use general registers, specified by constraint "r" */ | |
| 361 #if defined (__thumb__) && !defined (__thumb2__) | |
| 362 #define __CMSIS_GCC_OUT_REG(r) "=l" (r) | |
| 363 #define __CMSIS_GCC_USE_REG(r) "l" (r) | |
| 364 #else | |
| 365 #define __CMSIS_GCC_OUT_REG(r) "=r" (r) | |
| 366 #define __CMSIS_GCC_USE_REG(r) "r" (r) | |
| 367 #endif | |
| 368 | |
| 369 /** | |
| 370 \brief No Operation | |
| 371 \details No Operation does nothing. This instruction can be used for code alignment purposes. | |
| 372 */ | |
| 373 __attribute__((always_inline)) __STATIC_INLINE void __NOP(void) | |
| 374 { | |
| 375 __ASM volatile ("nop"); | |
| 376 } | |
| 377 | |
| 378 | |
| 379 /** | |
| 380 \brief Wait For Interrupt | |
| 381 \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. | |
| 382 */ | |
| 383 __attribute__((always_inline)) __STATIC_INLINE void __WFI(void) | |
| 384 { | |
| 385 __ASM volatile ("wfi"); | |
| 386 } | |
| 387 | |
| 388 | |
| 389 /** | |
| 390 \brief Wait For Event | |
| 391 \details Wait For Event is a hint instruction that permits the processor to enter | |
| 392 a low-power state until one of a number of events occurs. | |
| 393 */ | |
| 394 __attribute__((always_inline)) __STATIC_INLINE void __WFE(void) | |
| 395 { | |
| 396 __ASM volatile ("wfe"); | |
| 397 } | |
| 398 | |
| 399 | |
| 400 /** | |
| 401 \brief Send Event | |
| 402 \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. | |
| 403 */ | |
| 404 __attribute__((always_inline)) __STATIC_INLINE void __SEV(void) | |
| 405 { | |
| 406 __ASM volatile ("sev"); | |
| 407 } | |
| 408 | |
| 409 | |
| 410 /** | |
| 411 \brief Instruction Synchronization Barrier | |
| 412 \details Instruction Synchronization Barrier flushes the pipeline in the processor, | |
| 413 so that all instructions following the ISB are fetched from cache or memory, | |
| 414 after the instruction has been completed. | |
| 415 */ | |
| 416 __attribute__((always_inline)) __STATIC_INLINE void __ISB(void) | |
| 417 { | |
| 418 __ASM volatile ("isb 0xF":::"memory"); | |
| 419 } | |
| 420 | |
| 421 | |
| 422 /** | |
| 423 \brief Data Synchronization Barrier | |
| 424 \details Acts as a special kind of Data Memory Barrier. | |
| 425 It completes when all explicit memory accesses before this instruction complete. | |
| 426 */ | |
| 427 __attribute__((always_inline)) __STATIC_INLINE void __DSB(void) | |
| 428 { | |
| 429 __ASM volatile ("dsb 0xF":::"memory"); | |
| 430 } | |
| 431 | |
| 432 | |
| 433 /** | |
| 434 \brief Data Memory Barrier | |
| 435 \details Ensures the apparent order of the explicit memory operations before | |
| 436 and after the instruction, without ensuring their completion. | |
| 437 */ | |
| 438 __attribute__((always_inline)) __STATIC_INLINE void __DMB(void) | |
| 439 { | |
| 440 __ASM volatile ("dmb 0xF":::"memory"); | |
| 441 } | |
| 442 | |
| 443 | |
| 444 /** | |
| 445 \brief Reverse byte order (32 bit) | |
| 446 \details Reverses the byte order in integer value. | |
| 447 \param [in] value Value to reverse | |
| 448 \return Reversed value | |
| 449 */ | |
| 450 __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value) | |
| 451 { | |
| 452 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) | |
| 453 return __builtin_bswap32(value); | |
| 454 #else | |
| 455 uint32_t result; | |
| 456 | |
| 457 __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); | |
| 458 return(result); | |
| 459 #endif | |
| 460 } | |
| 461 | |
| 462 | |
| 463 /** | |
| 464 \brief Reverse byte order (16 bit) | |
| 465 \details Reverses the byte order in two unsigned short values. | |
| 466 \param [in] value Value to reverse | |
| 467 \return Reversed value | |
| 468 */ | |
| 469 __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value) | |
| 470 { | |
| 471 uint32_t result; | |
| 472 | |
| 473 __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); | |
| 474 return(result); | |
| 475 } | |
| 476 | |
| 477 | |
| 478 /** | |
| 479 \brief Reverse byte order in signed short value | |
| 480 \details Reverses the byte order in a signed short value with sign extension to integer. | |
| 481 \param [in] value Value to reverse | |
| 482 \return Reversed value | |
| 483 */ | |
| 484 __attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value) | |
| 485 { | |
| 486 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) | |
| 487 return (short)__builtin_bswap16(value); | |
| 488 #else | |
| 489 int32_t result; | |
| 490 | |
| 491 __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); | |
| 492 return(result); | |
| 493 #endif | |
| 494 } | |
| 495 | |
| 496 | |
| 497 /** | |
| 498 \brief Rotate Right in unsigned value (32 bit) | |
| 499 \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. | |
| 500 \param [in] value Value to rotate | |
| 501 \param [in] value Number of Bits to rotate | |
| 502 \return Rotated value | |
| 503 */ | |
| 504 __attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2) | |
| 505 { | |
| 506 return (op1 >> op2) | (op1 << (32U - op2)); | |
| 507 } | |
| 508 | |
| 509 | |
| 510 /** | |
| 511 \brief Breakpoint | |
| 512 \details Causes the processor to enter Debug state. | |
| 513 Debug tools can use this to investigate system state when the instruction at a particular address is reached. | |
| 514 \param [in] value is ignored by the processor. | |
| 515 If required, a debugger can use it to store additional information about the breakpoint. | |
| 516 */ | |
| 517 #define __BKPT(value) __ASM volatile ("bkpt "#value) | |
| 518 | |
| 519 | |
| 520 /** | |
| 521 \brief Reverse bit order of value | |
| 522 \details Reverses the bit order of the given value. | |
| 523 \param [in] value Value to reverse | |
| 524 \return Reversed value | |
| 525 */ | |
| 526 __attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value) | |
| 527 { | |
| 528 uint32_t result; | |
| 529 | |
| 530 #if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) | |
| 531 __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) ); | |
| 532 #else | |
| 533 int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */ | |
| 534 | |
| 535 result = value; /* r will be reversed bits of v; first get LSB of v */ | |
| 536 for (value >>= 1U; value; value >>= 1U) | |
| 537 { | |
| 538 result <<= 1U; | |
| 539 result |= value & 1U; | |
| 540 s--; | |
| 541 } | |
| 542 result <<= s; /* shift when v's highest bits are zero */ | |
| 543 #endif | |
| 544 return(result); | |
| 545 } | |
| 546 | |
| 547 | |
| 548 /** | |
| 549 \brief Count leading zeros | |
| 550 \details Counts the number of leading zeros of a data value. | |
| 551 \param [in] value Value to count the leading zeros | |
| 552 \return number of leading zeros in value | |
| 553 */ | |
| 554 #define __CLZ __builtin_clz | |
| 555 | |
| 556 | |
| 557 #if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) | |
| 558 | |
| 559 /** | |
| 560 \brief LDR Exclusive (8 bit) | |
| 561 \details Executes a exclusive LDR instruction for 8 bit value. | |
| 562 \param [in] ptr Pointer to data | |
| 563 \return value of type uint8_t at (*ptr) | |
| 564 */ | |
| 565 __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr) | |
| 566 { | |
| 567 uint32_t result; | |
| 568 | |
| 569 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) | |
| 570 __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); | |
| 571 #else | |
| 572 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not | |
| 573 accepted by assembler. So has to use following less efficient pattern. | |
| 574 */ | |
| 575 __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); | |
| 576 #endif | |
| 577 return ((uint8_t) result); /* Add explicit type cast here */ | |
| 578 } | |
| 579 | |
| 580 | |
| 581 /** | |
| 582 \brief LDR Exclusive (16 bit) | |
| 583 \details Executes a exclusive LDR instruction for 16 bit values. | |
| 584 \param [in] ptr Pointer to data | |
| 585 \return value of type uint16_t at (*ptr) | |
| 586 */ | |
| 587 __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr) | |
| 588 { | |
| 589 uint32_t result; | |
| 590 | |
| 591 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) | |
| 592 __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); | |
| 593 #else | |
| 594 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not | |
| 595 accepted by assembler. So has to use following less efficient pattern. | |
| 596 */ | |
| 597 __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); | |
| 598 #endif | |
| 599 return ((uint16_t) result); /* Add explicit type cast here */ | |
| 600 } | |
| 601 | |
| 602 | |
| 603 /** | |
| 604 \brief LDR Exclusive (32 bit) | |
| 605 \details Executes a exclusive LDR instruction for 32 bit values. | |
| 606 \param [in] ptr Pointer to data | |
| 607 \return value of type uint32_t at (*ptr) | |
| 608 */ | |
| 609 __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr) | |
| 610 { | |
| 611 uint32_t result; | |
| 612 | |
| 613 __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); | |
| 614 return(result); | |
| 615 } | |
| 616 | |
| 617 | |
| 618 /** | |
| 619 \brief STR Exclusive (8 bit) | |
| 620 \details Executes a exclusive STR instruction for 8 bit values. | |
| 621 \param [in] value Value to store | |
| 622 \param [in] ptr Pointer to location | |
| 623 \return 0 Function succeeded | |
| 624 \return 1 Function failed | |
| 625 */ | |
| 626 __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) | |
| 627 { | |
| 628 uint32_t result; | |
| 629 | |
| 630 __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); | |
| 631 return(result); | |
| 632 } | |
| 633 | |
| 634 | |
| 635 /** | |
| 636 \brief STR Exclusive (16 bit) | |
| 637 \details Executes a exclusive STR instruction for 16 bit values. | |
| 638 \param [in] value Value to store | |
| 639 \param [in] ptr Pointer to location | |
| 640 \return 0 Function succeeded | |
| 641 \return 1 Function failed | |
| 642 */ | |
| 643 __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) | |
| 644 { | |
| 645 uint32_t result; | |
| 646 | |
| 647 __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); | |
| 648 return(result); | |
| 649 } | |
| 650 | |
| 651 | |
| 652 /** | |
| 653 \brief STR Exclusive (32 bit) | |
| 654 \details Executes a exclusive STR instruction for 32 bit values. | |
| 655 \param [in] value Value to store | |
| 656 \param [in] ptr Pointer to location | |
| 657 \return 0 Function succeeded | |
| 658 \return 1 Function failed | |
| 659 */ | |
| 660 __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) | |
| 661 { | |
| 662 uint32_t result; | |
| 663 | |
| 664 __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); | |
| 665 return(result); | |
| 666 } | |
| 667 | |
| 668 | |
| 669 /** | |
| 670 \brief Remove the exclusive lock | |
| 671 \details Removes the exclusive lock which is created by LDREX. | |
| 672 */ | |
| 673 __attribute__((always_inline)) __STATIC_INLINE void __CLREX(void) | |
| 674 { | |
| 675 __ASM volatile ("clrex" ::: "memory"); | |
| 676 } | |
| 677 | |
| 678 | |
| 679 /** | |
| 680 \brief Signed Saturate | |
| 681 \details Saturates a signed value. | |
| 682 \param [in] value Value to be saturated | |
| 683 \param [in] sat Bit position to saturate to (1..32) | |
| 684 \return Saturated value | |
| 685 */ | |
| 686 #define __SSAT(ARG1,ARG2) \ | |
| 687 ({ \ | |
| 688 uint32_t __RES, __ARG1 = (ARG1); \ | |
| 689 __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ | |
| 690 __RES; \ | |
| 691 }) | |
| 692 | |
| 693 | |
| 694 /** | |
| 695 \brief Unsigned Saturate | |
| 696 \details Saturates an unsigned value. | |
| 697 \param [in] value Value to be saturated | |
| 698 \param [in] sat Bit position to saturate to (0..31) | |
| 699 \return Saturated value | |
| 700 */ | |
| 701 #define __USAT(ARG1,ARG2) \ | |
| 702 ({ \ | |
| 703 uint32_t __RES, __ARG1 = (ARG1); \ | |
| 704 __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ | |
| 705 __RES; \ | |
| 706 }) | |
| 707 | |
| 708 | |
| 709 /** | |
| 710 \brief Rotate Right with Extend (32 bit) | |
| 711 \details Moves each bit of a bitstring right by one bit. | |
| 712 The carry input is shifted in at the left end of the bitstring. | |
| 713 \param [in] value Value to rotate | |
| 714 \return Rotated value | |
| 715 */ | |
| 716 __attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value) | |
| 717 { | |
| 718 uint32_t result; | |
| 719 | |
| 720 __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); | |
| 721 return(result); | |
| 722 } | |
| 723 | |
| 724 | |
| 725 /** | |
| 726 \brief LDRT Unprivileged (8 bit) | |
| 727 \details Executes a Unprivileged LDRT instruction for 8 bit value. | |
| 728 \param [in] ptr Pointer to data | |
| 729 \return value of type uint8_t at (*ptr) | |
| 730 */ | |
| 731 __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr) | |
| 732 { | |
| 733 uint32_t result; | |
| 734 | |
| 735 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) | |
| 736 __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) ); | |
| 737 #else | |
| 738 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not | |
| 739 accepted by assembler. So has to use following less efficient pattern. | |
| 740 */ | |
| 741 __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); | |
| 742 #endif | |
| 743 return ((uint8_t) result); /* Add explicit type cast here */ | |
| 744 } | |
| 745 | |
| 746 | |
| 747 /** | |
| 748 \brief LDRT Unprivileged (16 bit) | |
| 749 \details Executes a Unprivileged LDRT instruction for 16 bit values. | |
| 750 \param [in] ptr Pointer to data | |
| 751 \return value of type uint16_t at (*ptr) | |
| 752 */ | |
| 753 __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr) | |
| 754 { | |
| 755 uint32_t result; | |
| 756 | |
| 757 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) | |
| 758 __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) ); | |
| 759 #else | |
| 760 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not | |
| 761 accepted by assembler. So has to use following less efficient pattern. | |
| 762 */ | |
| 763 __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); | |
| 764 #endif | |
| 765 return ((uint16_t) result); /* Add explicit type cast here */ | |
| 766 } | |
| 767 | |
| 768 | |
| 769 /** | |
| 770 \brief LDRT Unprivileged (32 bit) | |
| 771 \details Executes a Unprivileged LDRT instruction for 32 bit values. | |
| 772 \param [in] ptr Pointer to data | |
| 773 \return value of type uint32_t at (*ptr) | |
| 774 */ | |
| 775 __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr) | |
| 776 { | |
| 777 uint32_t result; | |
| 778 | |
| 779 __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) ); | |
| 780 return(result); | |
| 781 } | |
| 782 | |
| 783 | |
| 784 /** | |
| 785 \brief STRT Unprivileged (8 bit) | |
| 786 \details Executes a Unprivileged STRT instruction for 8 bit values. | |
| 787 \param [in] value Value to store | |
| 788 \param [in] ptr Pointer to location | |
| 789 */ | |
| 790 __attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr) | |
| 791 { | |
| 792 __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) ); | |
| 793 } | |
| 794 | |
| 795 | |
| 796 /** | |
| 797 \brief STRT Unprivileged (16 bit) | |
| 798 \details Executes a Unprivileged STRT instruction for 16 bit values. | |
| 799 \param [in] value Value to store | |
| 800 \param [in] ptr Pointer to location | |
| 801 */ | |
| 802 __attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr) | |
| 803 { | |
| 804 __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) ); | |
| 805 } | |
| 806 | |
| 807 | |
| 808 /** | |
| 809 \brief STRT Unprivileged (32 bit) | |
| 810 \details Executes a Unprivileged STRT instruction for 32 bit values. | |
| 811 \param [in] value Value to store | |
| 812 \param [in] ptr Pointer to location | |
| 813 */ | |
| 814 __attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr) | |
| 815 { | |
| 816 __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) ); | |
| 817 } | |
| 818 | |
| 819 #endif /* (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) */ | |
| 820 | |
| 821 /*@}*/ /* end of group CMSIS_Core_InstructionInterface */ | |
| 822 | |
| 823 | |
| 824 /* ################### Compiler specific Intrinsics ########################### */ | |
| 825 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics | |
| 826 Access to dedicated SIMD instructions | |
| 827 @{ | |
| 828 */ | |
| 829 | |
| 830 #if (__CORTEX_M >= 0x04U) /* only for Cortex-M4 and above */ | |
| 831 | |
| 832 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) | |
| 833 { | |
| 834 uint32_t result; | |
| 835 | |
| 836 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 837 return(result); | |
| 838 } | |
| 839 | |
| 840 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) | |
| 841 { | |
| 842 uint32_t result; | |
| 843 | |
| 844 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 845 return(result); | |
| 846 } | |
| 847 | |
| 848 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) | |
| 849 { | |
| 850 uint32_t result; | |
| 851 | |
| 852 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 853 return(result); | |
| 854 } | |
| 855 | |
| 856 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) | |
| 857 { | |
| 858 uint32_t result; | |
| 859 | |
| 860 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 861 return(result); | |
| 862 } | |
| 863 | |
| 864 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) | |
| 865 { | |
| 866 uint32_t result; | |
| 867 | |
| 868 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 869 return(result); | |
| 870 } | |
| 871 | |
| 872 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) | |
| 873 { | |
| 874 uint32_t result; | |
| 875 | |
| 876 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 877 return(result); | |
| 878 } | |
| 879 | |
| 880 | |
| 881 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) | |
| 882 { | |
| 883 uint32_t result; | |
| 884 | |
| 885 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 886 return(result); | |
| 887 } | |
| 888 | |
| 889 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) | |
| 890 { | |
| 891 uint32_t result; | |
| 892 | |
| 893 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 894 return(result); | |
| 895 } | |
| 896 | |
| 897 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) | |
| 898 { | |
| 899 uint32_t result; | |
| 900 | |
| 901 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 902 return(result); | |
| 903 } | |
| 904 | |
| 905 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) | |
| 906 { | |
| 907 uint32_t result; | |
| 908 | |
| 909 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 910 return(result); | |
| 911 } | |
| 912 | |
| 913 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) | |
| 914 { | |
| 915 uint32_t result; | |
| 916 | |
| 917 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 918 return(result); | |
| 919 } | |
| 920 | |
| 921 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) | |
| 922 { | |
| 923 uint32_t result; | |
| 924 | |
| 925 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 926 return(result); | |
| 927 } | |
| 928 | |
| 929 | |
| 930 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) | |
| 931 { | |
| 932 uint32_t result; | |
| 933 | |
| 934 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 935 return(result); | |
| 936 } | |
| 937 | |
| 938 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) | |
| 939 { | |
| 940 uint32_t result; | |
| 941 | |
| 942 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 943 return(result); | |
| 944 } | |
| 945 | |
| 946 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) | |
| 947 { | |
| 948 uint32_t result; | |
| 949 | |
| 950 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 951 return(result); | |
| 952 } | |
| 953 | |
| 954 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) | |
| 955 { | |
| 956 uint32_t result; | |
| 957 | |
| 958 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 959 return(result); | |
| 960 } | |
| 961 | |
| 962 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) | |
| 963 { | |
| 964 uint32_t result; | |
| 965 | |
| 966 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 967 return(result); | |
| 968 } | |
| 969 | |
| 970 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) | |
| 971 { | |
| 972 uint32_t result; | |
| 973 | |
| 974 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 975 return(result); | |
| 976 } | |
| 977 | |
| 978 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) | |
| 979 { | |
| 980 uint32_t result; | |
| 981 | |
| 982 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 983 return(result); | |
| 984 } | |
| 985 | |
| 986 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) | |
| 987 { | |
| 988 uint32_t result; | |
| 989 | |
| 990 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 991 return(result); | |
| 992 } | |
| 993 | |
| 994 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) | |
| 995 { | |
| 996 uint32_t result; | |
| 997 | |
| 998 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 999 return(result); | |
| 1000 } | |
| 1001 | |
| 1002 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) | |
| 1003 { | |
| 1004 uint32_t result; | |
| 1005 | |
| 1006 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1007 return(result); | |
| 1008 } | |
| 1009 | |
| 1010 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) | |
| 1011 { | |
| 1012 uint32_t result; | |
| 1013 | |
| 1014 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1015 return(result); | |
| 1016 } | |
| 1017 | |
| 1018 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) | |
| 1019 { | |
| 1020 uint32_t result; | |
| 1021 | |
| 1022 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1023 return(result); | |
| 1024 } | |
| 1025 | |
| 1026 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2) | |
| 1027 { | |
| 1028 uint32_t result; | |
| 1029 | |
| 1030 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1031 return(result); | |
| 1032 } | |
| 1033 | |
| 1034 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2) | |
| 1035 { | |
| 1036 uint32_t result; | |
| 1037 | |
| 1038 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1039 return(result); | |
| 1040 } | |
| 1041 | |
| 1042 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) | |
| 1043 { | |
| 1044 uint32_t result; | |
| 1045 | |
| 1046 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1047 return(result); | |
| 1048 } | |
| 1049 | |
| 1050 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2) | |
| 1051 { | |
| 1052 uint32_t result; | |
| 1053 | |
| 1054 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1055 return(result); | |
| 1056 } | |
| 1057 | |
| 1058 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) | |
| 1059 { | |
| 1060 uint32_t result; | |
| 1061 | |
| 1062 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1063 return(result); | |
| 1064 } | |
| 1065 | |
| 1066 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) | |
| 1067 { | |
| 1068 uint32_t result; | |
| 1069 | |
| 1070 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1071 return(result); | |
| 1072 } | |
| 1073 | |
| 1074 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) | |
| 1075 { | |
| 1076 uint32_t result; | |
| 1077 | |
| 1078 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1079 return(result); | |
| 1080 } | |
| 1081 | |
| 1082 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) | |
| 1083 { | |
| 1084 uint32_t result; | |
| 1085 | |
| 1086 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1087 return(result); | |
| 1088 } | |
| 1089 | |
| 1090 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) | |
| 1091 { | |
| 1092 uint32_t result; | |
| 1093 | |
| 1094 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1095 return(result); | |
| 1096 } | |
| 1097 | |
| 1098 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2) | |
| 1099 { | |
| 1100 uint32_t result; | |
| 1101 | |
| 1102 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1103 return(result); | |
| 1104 } | |
| 1105 | |
| 1106 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) | |
| 1107 { | |
| 1108 uint32_t result; | |
| 1109 | |
| 1110 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1111 return(result); | |
| 1112 } | |
| 1113 | |
| 1114 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) | |
| 1115 { | |
| 1116 uint32_t result; | |
| 1117 | |
| 1118 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1119 return(result); | |
| 1120 } | |
| 1121 | |
| 1122 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) | |
| 1123 { | |
| 1124 uint32_t result; | |
| 1125 | |
| 1126 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1127 return(result); | |
| 1128 } | |
| 1129 | |
| 1130 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) | |
| 1131 { | |
| 1132 uint32_t result; | |
| 1133 | |
| 1134 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); | |
| 1135 return(result); | |
| 1136 } | |
| 1137 | |
| 1138 #define __SSAT16(ARG1,ARG2) \ | |
| 1139 ({ \ | |
| 1140 int32_t __RES, __ARG1 = (ARG1); \ | |
| 1141 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ | |
| 1142 __RES; \ | |
| 1143 }) | |
| 1144 | |
| 1145 #define __USAT16(ARG1,ARG2) \ | |
| 1146 ({ \ | |
| 1147 uint32_t __RES, __ARG1 = (ARG1); \ | |
| 1148 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ | |
| 1149 __RES; \ | |
| 1150 }) | |
| 1151 | |
| 1152 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1) | |
| 1153 { | |
| 1154 uint32_t result; | |
| 1155 | |
| 1156 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); | |
| 1157 return(result); | |
| 1158 } | |
| 1159 | |
| 1160 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) | |
| 1161 { | |
| 1162 uint32_t result; | |
| 1163 | |
| 1164 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1165 return(result); | |
| 1166 } | |
| 1167 | |
| 1168 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1) | |
| 1169 { | |
| 1170 uint32_t result; | |
| 1171 | |
| 1172 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); | |
| 1173 return(result); | |
| 1174 } | |
| 1175 | |
| 1176 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) | |
| 1177 { | |
| 1178 uint32_t result; | |
| 1179 | |
| 1180 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1181 return(result); | |
| 1182 } | |
| 1183 | |
| 1184 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) | |
| 1185 { | |
| 1186 uint32_t result; | |
| 1187 | |
| 1188 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1189 return(result); | |
| 1190 } | |
| 1191 | |
| 1192 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) | |
| 1193 { | |
| 1194 uint32_t result; | |
| 1195 | |
| 1196 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1197 return(result); | |
| 1198 } | |
| 1199 | |
| 1200 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) | |
| 1201 { | |
| 1202 uint32_t result; | |
| 1203 | |
| 1204 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); | |
| 1205 return(result); | |
| 1206 } | |
| 1207 | |
| 1208 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) | |
| 1209 { | |
| 1210 uint32_t result; | |
| 1211 | |
| 1212 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); | |
| 1213 return(result); | |
| 1214 } | |
| 1215 | |
| 1216 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) | |
| 1217 { | |
| 1218 union llreg_u{ | |
| 1219 uint32_t w32[2]; | |
| 1220 uint64_t w64; | |
| 1221 } llr; | |
| 1222 llr.w64 = acc; | |
| 1223 | |
| 1224 #ifndef __ARMEB__ /* Little endian */ | |
| 1225 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); | |
| 1226 #else /* Big endian */ | |
| 1227 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); | |
| 1228 #endif | |
| 1229 | |
| 1230 return(llr.w64); | |
| 1231 } | |
| 1232 | |
| 1233 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) | |
| 1234 { | |
| 1235 union llreg_u{ | |
| 1236 uint32_t w32[2]; | |
| 1237 uint64_t w64; | |
| 1238 } llr; | |
| 1239 llr.w64 = acc; | |
| 1240 | |
| 1241 #ifndef __ARMEB__ /* Little endian */ | |
| 1242 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); | |
| 1243 #else /* Big endian */ | |
| 1244 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); | |
| 1245 #endif | |
| 1246 | |
| 1247 return(llr.w64); | |
| 1248 } | |
| 1249 | |
| 1250 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) | |
| 1251 { | |
| 1252 uint32_t result; | |
| 1253 | |
| 1254 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1255 return(result); | |
| 1256 } | |
| 1257 | |
| 1258 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) | |
| 1259 { | |
| 1260 uint32_t result; | |
| 1261 | |
| 1262 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1263 return(result); | |
| 1264 } | |
| 1265 | |
| 1266 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) | |
| 1267 { | |
| 1268 uint32_t result; | |
| 1269 | |
| 1270 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); | |
| 1271 return(result); | |
| 1272 } | |
| 1273 | |
| 1274 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) | |
| 1275 { | |
| 1276 uint32_t result; | |
| 1277 | |
| 1278 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); | |
| 1279 return(result); | |
| 1280 } | |
| 1281 | |
| 1282 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) | |
| 1283 { | |
| 1284 union llreg_u{ | |
| 1285 uint32_t w32[2]; | |
| 1286 uint64_t w64; | |
| 1287 } llr; | |
| 1288 llr.w64 = acc; | |
| 1289 | |
| 1290 #ifndef __ARMEB__ /* Little endian */ | |
| 1291 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); | |
| 1292 #else /* Big endian */ | |
| 1293 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); | |
| 1294 #endif | |
| 1295 | |
| 1296 return(llr.w64); | |
| 1297 } | |
| 1298 | |
| 1299 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) | |
| 1300 { | |
| 1301 union llreg_u{ | |
| 1302 uint32_t w32[2]; | |
| 1303 uint64_t w64; | |
| 1304 } llr; | |
| 1305 llr.w64 = acc; | |
| 1306 | |
| 1307 #ifndef __ARMEB__ /* Little endian */ | |
| 1308 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); | |
| 1309 #else /* Big endian */ | |
| 1310 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); | |
| 1311 #endif | |
| 1312 | |
| 1313 return(llr.w64); | |
| 1314 } | |
| 1315 | |
| 1316 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2) | |
| 1317 { | |
| 1318 uint32_t result; | |
| 1319 | |
| 1320 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1321 return(result); | |
| 1322 } | |
| 1323 | |
| 1324 __attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __QADD( int32_t op1, int32_t op2) | |
| 1325 { | |
| 1326 int32_t result; | |
| 1327 | |
| 1328 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1329 return(result); | |
| 1330 } | |
| 1331 | |
| 1332 __attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __QSUB( int32_t op1, int32_t op2) | |
| 1333 { | |
| 1334 int32_t result; | |
| 1335 | |
| 1336 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); | |
| 1337 return(result); | |
| 1338 } | |
| 1339 | |
| 1340 #define __PKHBT(ARG1,ARG2,ARG3) \ | |
| 1341 ({ \ | |
| 1342 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ | |
| 1343 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ | |
| 1344 __RES; \ | |
| 1345 }) | |
| 1346 | |
| 1347 #define __PKHTB(ARG1,ARG2,ARG3) \ | |
| 1348 ({ \ | |
| 1349 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ | |
| 1350 if (ARG3 == 0) \ | |
| 1351 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ | |
| 1352 else \ | |
| 1353 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ | |
| 1354 __RES; \ | |
| 1355 }) | |
| 1356 | |
| 1357 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) | |
| 1358 { | |
| 1359 int32_t result; | |
| 1360 | |
| 1361 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); | |
| 1362 return(result); | |
| 1363 } | |
| 1364 | |
| 1365 #endif /* (__CORTEX_M >= 0x04) */ | |
| 1366 /*@} end of group CMSIS_SIMD_intrinsics */ | |
| 1367 | |
| 1368 | |
| 1369 #if defined ( __GNUC__ ) | |
| 1370 #pragma GCC diagnostic pop | |
| 1371 #endif | |
| 1372 | |
| 1373 #endif /* __CMSIS_GCC_H */ |
