xtensa_context.S 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. /*
  2. * FreeRTOS Kernel V10.4.6
  3. * Copyright (C) 2015-2019 Cadence Design Systems, Inc.
  4. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  5. *
  6. * SPDX-License-Identifier: MIT
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  9. * this software and associated documentation files (the "Software"), to deal in
  10. * the Software without restriction, including without limitation the rights to
  11. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  12. * the Software, and to permit persons to whom the Software is furnished to do so,
  13. * subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  20. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  21. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  22. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  23. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. * https://www.FreeRTOS.org
  26. * https://github.com/FreeRTOS
  27. *
  28. */
  29. /*
  30. * XTENSA CONTEXT SAVE AND RESTORE ROUTINES
  31. *
  32. * Low-level Call0 functions for handling generic context save and restore of
  33. * registers not specifically addressed by the interrupt vectors and handlers.
  34. * Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
  35. * Except for the calls to RTOS functions, this code is generic to Xtensa.
  36. *
  37. * Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-
  38. * save regs (A12-A15), which is always the case if the handlers are coded in C.
  39. * However A12, A13 are made available as scratch registers for interrupt dispatch
  40. * code, so are presumed saved anyway, and are always restored even in Call0 ABI.
  41. * Only A14, A15 are truly handled as callee-save regs.
  42. *
  43. * Because Xtensa is a configurable architecture, this port supports all user
  44. * generated configurations (except restrictions stated in the release notes).
  45. * This is accomplished by conditional compilation using macros and functions
  46. * defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
  47. * Only the processor state included in your configuration is saved and restored,
  48. * including any processor state added by user configuration options or TIE.
  49. */
  50. /* Warn nicely if this file gets named with a lowercase .s instead of .S: */
  51. #define NOERROR #
  52. NOERROR: .error "C preprocessor needed for this file: make sure its filename\
  53. ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option."
  54. #include "xtensa_rtos.h"
  55. #ifdef XT_USE_OVLY
  56. #include <xtensa/overlay_os_asm.h>
  57. #endif
  58. .text
  59. /*******************************************************************************
  60. _xt_context_save
  61. !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
  62. Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the
  63. interrupt stack frame defined in xtensa_rtos.h.
  64. Its counterpart is _xt_context_restore (which also restores A12, A13).
  65. Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
  66. This function preserves A12 & A13 in order to provide the caller with 2 scratch
  67. regs that need not be saved over the call to this function. The choice of which
  68. 2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,
  69. to avoid moving data more than necessary. Caller can assign regs accordingly.
  70. Entry Conditions:
  71. A0 = Return address in caller.
  72. A1 = Stack pointer of interrupted thread or handler ("interruptee").
  73. Original A12, A13 have already been saved in the interrupt stack frame.
  74. Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
  75. point of interruption.
  76. If windowed ABI, PS.EXCM = 1 (exceptions disabled).
  77. Exit conditions:
  78. A0 = Return address in caller.
  79. A1 = Stack pointer of interrupted thread or handler ("interruptee").
  80. A12, A13 as at entry (preserved).
  81. If windowed ABI, PS.EXCM = 1 (exceptions disabled).
  82. *******************************************************************************/
  83. .global _xt_context_save
  84. .type _xt_context_save,@function
  85. .align 4
  86. _xt_context_save:
  87. s32i a2, sp, XT_STK_A2
  88. s32i a3, sp, XT_STK_A3
  89. s32i a4, sp, XT_STK_A4
  90. s32i a5, sp, XT_STK_A5
  91. s32i a6, sp, XT_STK_A6
  92. s32i a7, sp, XT_STK_A7
  93. s32i a8, sp, XT_STK_A8
  94. s32i a9, sp, XT_STK_A9
  95. s32i a10, sp, XT_STK_A10
  96. s32i a11, sp, XT_STK_A11
  97. /*
  98. Call0 ABI callee-saved regs a12-15 do not need to be saved here.
  99. a12-13 are the caller's responsibility so it can use them as scratch.
  100. So only need to save a14-a15 here for Windowed ABI (not Call0).
  101. */
  102. #ifndef __XTENSA_CALL0_ABI__
  103. s32i a14, sp, XT_STK_A14
  104. s32i a15, sp, XT_STK_A15
  105. #endif
  106. rsr a3, SAR
  107. s32i a3, sp, XT_STK_SAR
  108. #if XCHAL_HAVE_LOOPS
  109. rsr a3, LBEG
  110. s32i a3, sp, XT_STK_LBEG
  111. rsr a3, LEND
  112. s32i a3, sp, XT_STK_LEND
  113. rsr a3, LCOUNT
  114. s32i a3, sp, XT_STK_LCOUNT
  115. #endif
  116. #if XT_USE_SWPRI
  117. /* Save virtual priority mask */
  118. movi a3, _xt_vpri_mask
  119. l32i a3, a3, 0
  120. s32i a3, sp, XT_STK_VPRI
  121. #endif
  122. #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
  123. mov a9, a0 /* preserve ret addr */
  124. #endif
  125. #ifndef __XTENSA_CALL0_ABI__
  126. /*
  127. To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.
  128. Need to save a9,12,13 temporarily (in frame temps) and recover originals.
  129. Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow
  130. and underflow exceptions disabled (assured by PS.EXCM == 1).
  131. */
  132. s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */
  133. s32i a13, sp, XT_STK_TMP1
  134. s32i a9, sp, XT_STK_TMP2
  135. /*
  136. Save the overlay state if we are supporting overlays. Since we just saved
  137. three registers, we can conveniently use them here. Note that as of now,
  138. overlays only work for windowed calling ABI.
  139. */
  140. #ifdef XT_USE_OVLY
  141. l32i a9, sp, XT_STK_PC /* recover saved PC */
  142. _xt_overlay_get_state a9, a12, a13
  143. s32i a9, sp, XT_STK_OVLY /* save overlay state */
  144. #endif
  145. l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */
  146. l32i a13, sp, XT_STK_A13
  147. l32i a9, sp, XT_STK_A9
  148. addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */
  149. call0 xthal_window_spill_nw /* preserves only a4,5,8,9,12,13 */
  150. addi sp, sp, -XT_STK_FRMSZ
  151. l32i a12, sp, XT_STK_TMP0 /* recover stuff from stack frame */
  152. l32i a13, sp, XT_STK_TMP1
  153. l32i a9, sp, XT_STK_TMP2
  154. #endif
  155. #if XCHAL_EXTRA_SA_SIZE > 0
  156. /*
  157. NOTE: Normally the xthal_save_extra_nw macro only affects address
  158. registers a2-a5. It is theoretically possible for Xtensa processor
  159. designers to write TIE that causes more address registers to be
  160. affected, but it is generally unlikely. If that ever happens,
  161. more registers need to be saved/restored around this macro invocation.
  162. Here we assume a9,12,13 are preserved.
  163. Future Xtensa tools releases might limit the regs that can be affected.
  164. */
  165. addi a2, sp, XT_STK_EXTRA /* where to save it */
  166. # if XCHAL_EXTRA_SA_ALIGN > 16
  167. movi a3, -XCHAL_EXTRA_SA_ALIGN
  168. and a2, a2, a3 /* align dynamically >16 bytes */
  169. # endif
  170. call0 xthal_save_extra_nw /* destroys a0,2,3,4,5 */
  171. #endif
  172. #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
  173. mov a0, a9 /* retrieve ret addr */
  174. #endif
  175. ret
  176. /*******************************************************************************
  177. _xt_context_restore
  178. !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
  179. Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
  180. ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt
  181. stack frame defined in xtensa_rtos.h .
  182. Its counterpart is _xt_context_save (whose caller saved A12, A13).
  183. Caller is responsible to restore PC, PS, A0, A1 (SP).
  184. Entry Conditions:
  185. A0 = Return address in caller.
  186. A1 = Stack pointer of interrupted thread or handler ("interruptee").
  187. Exit conditions:
  188. A0 = Return address in caller.
  189. A1 = Stack pointer of interrupted thread or handler ("interruptee").
  190. Other processor state except PC, PS, A0, A1 (SP), is as at the point
  191. of interruption.
  192. *******************************************************************************/
  193. .global _xt_context_restore
  194. .type _xt_context_restore,@function
  195. .align 4
  196. _xt_context_restore:
  197. #if XCHAL_EXTRA_SA_SIZE > 0
  198. /*
  199. NOTE: Normally the xthal_restore_extra_nw macro only affects address
  200. registers a2-a5. It is theoretically possible for Xtensa processor
  201. designers to write TIE that causes more address registers to be
  202. affected, but it is generally unlikely. If that ever happens,
  203. more registers need to be saved/restored around this macro invocation.
  204. Here we only assume a13 is preserved.
  205. Future Xtensa tools releases might limit the regs that can be affected.
  206. */
  207. mov a13, a0 /* preserve ret addr */
  208. addi a2, sp, XT_STK_EXTRA /* where to find it */
  209. # if XCHAL_EXTRA_SA_ALIGN > 16
  210. movi a3, -XCHAL_EXTRA_SA_ALIGN
  211. and a2, a2, a3 /* align dynamically >16 bytes */
  212. # endif
  213. call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */
  214. mov a0, a13 /* retrieve ret addr */
  215. #endif
  216. #if XCHAL_HAVE_LOOPS
  217. l32i a2, sp, XT_STK_LBEG
  218. l32i a3, sp, XT_STK_LEND
  219. wsr a2, LBEG
  220. l32i a2, sp, XT_STK_LCOUNT
  221. wsr a3, LEND
  222. wsr a2, LCOUNT
  223. #endif
  224. #ifdef XT_USE_OVLY
  225. /*
  226. If we are using overlays, this is a good spot to check if we need
  227. to restore an overlay for the incoming task. Here we have a bunch
  228. of registers to spare. Note that this step is going to use a few
  229. bytes of storage below SP (SP-20 to SP-32) if an overlay is going
  230. to be restored.
  231. */
  232. l32i a2, sp, XT_STK_PC /* retrieve PC */
  233. l32i a3, sp, XT_STK_PS /* retrieve PS */
  234. l32i a4, sp, XT_STK_OVLY /* retrieve overlay state */
  235. l32i a5, sp, XT_STK_A1 /* retrieve stack ptr */
  236. _xt_overlay_check_map a2, a3, a4, a5, a6
  237. s32i a2, sp, XT_STK_PC /* save updated PC */
  238. s32i a3, sp, XT_STK_PS /* save updated PS */
  239. #endif
  240. #ifdef XT_USE_SWPRI
  241. /* Restore virtual interrupt priority and interrupt enable */
  242. movi a3, _xt_intdata
  243. l32i a4, a3, 0 /* a4 = _xt_intenable */
  244. l32i a5, sp, XT_STK_VPRI /* a5 = saved _xt_vpri_mask */
  245. and a4, a4, a5
  246. wsr a4, INTENABLE /* update INTENABLE */
  247. s32i a5, a3, 4 /* restore _xt_vpri_mask */
  248. #endif
  249. l32i a3, sp, XT_STK_SAR
  250. l32i a2, sp, XT_STK_A2
  251. wsr a3, SAR
  252. l32i a3, sp, XT_STK_A3
  253. l32i a4, sp, XT_STK_A4
  254. l32i a5, sp, XT_STK_A5
  255. l32i a6, sp, XT_STK_A6
  256. l32i a7, sp, XT_STK_A7
  257. l32i a8, sp, XT_STK_A8
  258. l32i a9, sp, XT_STK_A9
  259. l32i a10, sp, XT_STK_A10
  260. l32i a11, sp, XT_STK_A11
  261. /*
  262. Call0 ABI callee-saved regs a12-15 do not need to be restored here.
  263. However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
  264. so need to be restored anyway, despite being callee-saved in Call0.
  265. */
  266. l32i a12, sp, XT_STK_A12
  267. l32i a13, sp, XT_STK_A13
  268. #ifndef __XTENSA_CALL0_ABI__
  269. l32i a14, sp, XT_STK_A14
  270. l32i a15, sp, XT_STK_A15
  271. #endif
  272. ret
  273. /*******************************************************************************
  274. _xt_coproc_init
  275. Initializes global co-processor management data, setting all co-processors
  276. to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
  277. Called during initialization of the RTOS, before any threads run.
  278. This may be called from normal Xtensa single-threaded application code which
  279. might use co-processors. The Xtensa run-time initialization enables all
  280. co-processors. They must remain enabled here, else a co-processor exception
  281. might occur outside of a thread, which the exception handler doesn't expect.
  282. Entry Conditions:
  283. Xtensa single-threaded run-time environment is in effect.
  284. No thread is yet running.
  285. Exit conditions:
  286. None.
  287. Obeys ABI conventions per prototype:
  288. void _xt_coproc_init(void)
  289. *******************************************************************************/
  290. #if XCHAL_CP_NUM > 0
  291. .global _xt_coproc_init
  292. .type _xt_coproc_init,@function
  293. .align 4
  294. _xt_coproc_init:
  295. ENTRY0
  296. /* Initialize thread co-processor ownerships to 0 (unowned). */
  297. movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */
  298. addi a3, a2, XCHAL_CP_MAX << 2 /* a3 = top+1 of owner array */
  299. movi a4, 0 /* a4 = 0 (unowned) */
  300. 1: s32i a4, a2, 0
  301. addi a2, a2, 4
  302. bltu a2, a3, 1b
  303. RET0
  304. #endif
  305. /*******************************************************************************
  306. _xt_coproc_release
  307. Releases any and all co-processors owned by a given thread. The thread is
  308. identified by it's co-processor state save area defined in xtensa_context.h .
  309. Must be called before a thread's co-proc save area is deleted to avoid
  310. memory corruption when the exception handler tries to save the state.
  311. May be called when a thread terminates or completes but does not delete
  312. the co-proc save area, to avoid the exception handler having to save the
  313. thread's co-proc state before another thread can use it (optimization).
  314. Entry Conditions:
  315. A2 = Pointer to base of co-processor state save area.
  316. Exit conditions:
  317. None.
  318. Obeys ABI conventions per prototype:
  319. void _xt_coproc_release(void * coproc_sa_base)
  320. *******************************************************************************/
  321. #if XCHAL_CP_NUM > 0
  322. .global _xt_coproc_release
  323. .type _xt_coproc_release,@function
  324. .align 4
  325. _xt_coproc_release:
  326. ENTRY0 /* a2 = base of save area */
  327. movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
  328. addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */
  329. movi a5, 0 /* a5 = 0 (unowned) */
  330. rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */
  331. 1: l32i a7, a3, 0 /* a7 = owner at a3 */
  332. bne a2, a7, 2f /* if (coproc_sa_base == owner) */
  333. s32i a5, a3, 0 /* owner = unowned */
  334. 2: addi a3, a3, 1<<2 /* a3 = next entry in owner array */
  335. bltu a3, a4, 1b /* repeat until end of array */
  336. 3: wsr a6, PS /* restore interrupts */
  337. RET0
  338. #endif
  339. /*******************************************************************************
  340. _xt_coproc_savecs
  341. If there is a current thread and it has a coprocessor state save area, then
  342. save all callee-saved state into this area. This function is called from the
  343. solicited context switch handler. It calls a system-specific function to get
  344. the coprocessor save area base address.
  345. Entry conditions:
  346. - The thread being switched out is still the current thread.
  347. - CPENABLE state reflects which coprocessors are active.
  348. - Registers have been saved/spilled already.
  349. Exit conditions:
  350. - All necessary CP callee-saved state has been saved.
  351. - Registers a2-a7, a13-a15 have been trashed.
  352. Must be called from assembly code only, using CALL0.
  353. *******************************************************************************/
  354. #if XCHAL_CP_NUM > 0
  355. .extern _xt_coproc_sa_offset /* external reference */
  356. .global _xt_coproc_savecs
  357. .type _xt_coproc_savecs,@function
  358. .align 4
  359. _xt_coproc_savecs:
  360. /* At entry, CPENABLE should be showing which CPs are enabled. */
  361. rsr a2, CPENABLE /* a2 = which CPs are enabled */
  362. beqz a2, .Ldone /* quick exit if none */
  363. mov a14, a0 /* save return address */
  364. call0 XT_RTOS_CP_STATE /* get address of CP save area */
  365. mov a0, a14 /* restore return address */
  366. beqz a15, .Ldone /* if none then nothing to do */
  367. s16i a2, a15, XT_CP_CS_ST /* save mask of CPs being stored */
  368. movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
  369. l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
  370. #if XCHAL_CP0_SA_SIZE
  371. bbci.l a2, 0, 2f /* CP 0 not enabled */
  372. l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
  373. add a3, a14, a15 /* a3 = save area for CP 0 */
  374. xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  375. 2:
  376. #endif
  377. #if XCHAL_CP1_SA_SIZE
  378. bbci.l a2, 1, 2f /* CP 1 not enabled */
  379. l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
  380. add a3, a14, a15 /* a3 = save area for CP 1 */
  381. xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  382. 2:
  383. #endif
  384. #if XCHAL_CP2_SA_SIZE
  385. bbci.l a2, 2, 2f
  386. l32i a14, a13, 8
  387. add a3, a14, a15
  388. xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  389. 2:
  390. #endif
  391. #if XCHAL_CP3_SA_SIZE
  392. bbci.l a2, 3, 2f
  393. l32i a14, a13, 12
  394. add a3, a14, a15
  395. xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  396. 2:
  397. #endif
  398. #if XCHAL_CP4_SA_SIZE
  399. bbci.l a2, 4, 2f
  400. l32i a14, a13, 16
  401. add a3, a14, a15
  402. xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  403. 2:
  404. #endif
  405. #if XCHAL_CP5_SA_SIZE
  406. bbci.l a2, 5, 2f
  407. l32i a14, a13, 20
  408. add a3, a14, a15
  409. xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  410. 2:
  411. #endif
  412. #if XCHAL_CP6_SA_SIZE
  413. bbci.l a2, 6, 2f
  414. l32i a14, a13, 24
  415. add a3, a14, a15
  416. xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  417. 2:
  418. #endif
  419. #if XCHAL_CP7_SA_SIZE
  420. bbci.l a2, 7, 2f
  421. l32i a14, a13, 28
  422. add a3, a14, a15
  423. xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  424. 2:
  425. #endif
  426. .Ldone:
  427. ret
  428. #endif
  429. /*******************************************************************************
  430. _xt_coproc_restorecs
  431. Restore any callee-saved coprocessor state for the incoming thread.
  432. This function is called from coprocessor exception handling, when giving
  433. ownership to a thread that solicited a context switch earlier. It calls a
  434. system-specific function to get the coprocessor save area base address.
  435. Entry conditions:
  436. - The incoming thread is set as the current thread.
  437. - CPENABLE is set up correctly for all required coprocessors.
  438. - a2 = mask of coprocessors to be restored.
  439. Exit conditions:
  440. - All necessary CP callee-saved state has been restored.
  441. - CPENABLE - unchanged.
  442. - Registers a2-a7, a13-a15 have been trashed.
  443. Must be called from assembly code only, using CALL0.
  444. *******************************************************************************/
  445. #if XCHAL_CP_NUM > 0
  446. .global _xt_coproc_restorecs
  447. .type _xt_coproc_restorecs,@function
  448. .align 4
  449. _xt_coproc_restorecs:
  450. mov a14, a0 /* save return address */
  451. call0 XT_RTOS_CP_STATE /* get address of CP save area */
  452. mov a0, a14 /* restore return address */
  453. beqz a15, .Ldone2 /* if none then nothing to do */
  454. l16ui a3, a15, XT_CP_CS_ST /* a3 = which CPs have been saved */
  455. xor a3, a3, a2 /* clear the ones being restored */
  456. s32i a3, a15, XT_CP_CS_ST /* update saved CP mask */
  457. movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
  458. l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
  459. #if XCHAL_CP0_SA_SIZE
  460. bbci.l a2, 0, 2f /* CP 0 not enabled */
  461. l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
  462. add a3, a14, a15 /* a3 = save area for CP 0 */
  463. xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  464. 2:
  465. #endif
  466. #if XCHAL_CP1_SA_SIZE
  467. bbci.l a2, 1, 2f /* CP 1 not enabled */
  468. l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
  469. add a3, a14, a15 /* a3 = save area for CP 1 */
  470. xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  471. 2:
  472. #endif
  473. #if XCHAL_CP2_SA_SIZE
  474. bbci.l a2, 2, 2f
  475. l32i a14, a13, 8
  476. add a3, a14, a15
  477. xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  478. 2:
  479. #endif
  480. #if XCHAL_CP3_SA_SIZE
  481. bbci.l a2, 3, 2f
  482. l32i a14, a13, 12
  483. add a3, a14, a15
  484. xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  485. 2:
  486. #endif
  487. #if XCHAL_CP4_SA_SIZE
  488. bbci.l a2, 4, 2f
  489. l32i a14, a13, 16
  490. add a3, a14, a15
  491. xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  492. 2:
  493. #endif
  494. #if XCHAL_CP5_SA_SIZE
  495. bbci.l a2, 5, 2f
  496. l32i a14, a13, 20
  497. add a3, a14, a15
  498. xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  499. 2:
  500. #endif
  501. #if XCHAL_CP6_SA_SIZE
  502. bbci.l a2, 6, 2f
  503. l32i a14, a13, 24
  504. add a3, a14, a15
  505. xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  506. 2:
  507. #endif
  508. #if XCHAL_CP7_SA_SIZE
  509. bbci.l a2, 7, 2f
  510. l32i a14, a13, 28
  511. add a3, a14, a15
  512. xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  513. 2:
  514. #endif
  515. .Ldone2:
  516. ret
  517. #endif