security_engine.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071
  1. /*
  2. * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
  3. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: BSD-3-Clause
  6. */
  7. #include <arch_helpers.h>
  8. #include <assert.h>
  9. #include <common/debug.h>
  10. #include <drivers/delay_timer.h>
  11. #include <errno.h>
  12. #include <lib/mmio.h>
  13. #include <lib/psci/psci.h>
  14. #include <se_private.h>
  15. #include <security_engine.h>
  16. #include <tegra_platform.h>
  17. /*******************************************************************************
  18. * Constants and Macros
  19. ******************************************************************************/
  20. #define TIMEOUT_100MS 100U /* Timeout in 100ms */
  21. #define RNG_AES_KEY_INDEX 1
  22. /*******************************************************************************
  23. * Data structure and global variables
  24. ******************************************************************************/
  25. /* The security engine contexts are formatted as follows:
  26. *
  27. * SE1 CONTEXT:
  28. * #--------------------------------#
  29. * | Random Data 1 Block |
  30. * #--------------------------------#
  31. * | Sticky Bits 2 Blocks |
  32. * #--------------------------------#
  33. * | Key Table 64 Blocks |
  34. * | For each Key (x16): |
  35. * | Key: 2 Blocks |
  36. * | Original-IV: 1 Block |
  37. * | Updated-IV: 1 Block |
  38. * #--------------------------------#
  39. * | RSA Keys 64 Blocks |
  40. * #--------------------------------#
  41. * | Known Pattern 1 Block |
  42. * #--------------------------------#
  43. *
  44. * SE2/PKA1 CONTEXT:
  45. * #--------------------------------#
  46. * | Random Data 1 Block |
  47. * #--------------------------------#
  48. * | Sticky Bits 2 Blocks |
  49. * #--------------------------------#
  50. * | Key Table 64 Blocks |
  51. * | For each Key (x16): |
  52. * | Key: 2 Blocks |
  53. * | Original-IV: 1 Block |
  54. * | Updated-IV: 1 Block |
  55. * #--------------------------------#
  56. * | RSA Keys 64 Blocks |
  57. * #--------------------------------#
  58. * | PKA sticky bits 1 Block |
  59. * #--------------------------------#
  60. * | PKA keys 512 Blocks |
  61. * #--------------------------------#
  62. * | Known Pattern 1 Block |
  63. * #--------------------------------#
  64. */
  65. /* Known pattern data for T210 */
  66. static const uint8_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE] = {
  67. /* 128 bit AES block */
  68. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  69. 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
  70. };
  71. /* SE input and output linked list buffers */
  72. static tegra_se_io_lst_t se1_src_ll_buf;
  73. static tegra_se_io_lst_t se1_dst_ll_buf;
  74. /* SE2 input and output linked list buffers */
  75. static tegra_se_io_lst_t se2_src_ll_buf;
  76. static tegra_se_io_lst_t se2_dst_ll_buf;
  77. /* SE1 context buffer, 132 blocks */
  78. static __aligned(64) uint8_t se1_ctx_buf[SE_CTX_DRBG_BUFER_SIZE];
  79. /* SE1 security engine device handle */
  80. static tegra_se_dev_t se_dev_1 = {
  81. .se_num = 1,
  82. /* Setup base address for se */
  83. .se_base = TEGRA_SE1_BASE,
  84. /* Setup context size in AES blocks */
  85. .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
  86. /* Setup SRC buffers for SE operations */
  87. .src_ll_buf = &se1_src_ll_buf,
  88. /* Setup DST buffers for SE operations */
  89. .dst_ll_buf = &se1_dst_ll_buf,
  90. /* Setup context save destination */
  91. .ctx_save_buf = (uint32_t *)&se1_ctx_buf
  92. };
  93. /* SE2 security engine device handle (T210B01 only) */
  94. static tegra_se_dev_t se_dev_2 = {
  95. .se_num = 2,
  96. /* Setup base address for se */
  97. .se_base = TEGRA_SE2_BASE,
  98. /* Setup context size in AES blocks */
  99. .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
  100. /* Setup SRC buffers for SE operations */
  101. .src_ll_buf = &se2_src_ll_buf,
  102. /* Setup DST buffers for SE operations */
  103. .dst_ll_buf = &se2_dst_ll_buf,
  104. /* Setup context save destination */
  105. .ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000)
  106. };
  107. static bool ecid_valid;
  108. /*******************************************************************************
  109. * Functions Definition
  110. ******************************************************************************/
  111. static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev)
  112. {
  113. flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)),
  114. sizeof(tegra_se_io_lst_t));
  115. flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)),
  116. sizeof(tegra_se_io_lst_t));
  117. }
  118. /*
  119. * Check that SE operation has completed after kickoff
  120. * This function is invoked after an SE operation has been started,
  121. * and it checks the following conditions:
  122. * 1. SE_INT_STATUS = SE_OP_DONE
  123. * 2. SE_STATUS = IDLE
  124. * 3. AHB bus data transfer complete.
  125. * 4. SE_ERR_STATUS is clean.
  126. */
  127. static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
  128. {
  129. uint32_t val = 0;
  130. int32_t ret = 0;
  131. uint32_t timeout;
  132. /* Poll the SE interrupt register to ensure H/W operation complete */
  133. val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
  134. for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) &&
  135. (timeout < TIMEOUT_100MS); timeout++) {
  136. mdelay(1);
  137. val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
  138. }
  139. if (timeout == TIMEOUT_100MS) {
  140. ERROR("%s: ERR: Atomic context save operation timeout!\n",
  141. __func__);
  142. ret = -ETIMEDOUT;
  143. }
  144. /* Poll the SE status idle to ensure H/W operation complete */
  145. if (ret == 0) {
  146. val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
  147. for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS);
  148. timeout++) {
  149. mdelay(1);
  150. val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
  151. }
  152. if (timeout == TIMEOUT_100MS) {
  153. ERROR("%s: ERR: MEM_INTERFACE and SE state "
  154. "idle state timeout.\n", __func__);
  155. ret = -ETIMEDOUT;
  156. }
  157. }
  158. /* Check AHB bus transfer complete */
  159. if (ret == 0) {
  160. val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
  161. for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) &&
  162. (timeout < TIMEOUT_100MS); timeout++) {
  163. mdelay(1);
  164. val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
  165. }
  166. if (timeout == TIMEOUT_100MS) {
  167. ERROR("%s: SE write over AHB timeout.\n", __func__);
  168. ret = -ETIMEDOUT;
  169. }
  170. }
  171. /* Ensure that no errors are thrown during operation */
  172. if (ret == 0) {
  173. val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET);
  174. if (val != 0U) {
  175. ERROR("%s: error during SE operation! 0x%x", __func__, val);
  176. ret = -ENOTSUP;
  177. }
  178. }
  179. return ret;
  180. }
  181. /*
  182. * Wait for SE engine to be idle and clear pending interrupts before
  183. * starting the next SE operation.
  184. */
  185. static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev)
  186. {
  187. int32_t ret = 0;
  188. uint32_t val = 0;
  189. uint32_t timeout;
  190. /* disable SE interrupt to prevent interrupt issued by SE operation */
  191. tegra_se_write_32(se_dev, SE_INT_ENABLE_REG_OFFSET, 0U);
  192. /* Wait for previous operation to finish */
  193. val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
  194. for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
  195. mdelay(1);
  196. val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
  197. }
  198. if (timeout == TIMEOUT_100MS) {
  199. ERROR("%s: ERR: SE status is not idle!\n", __func__);
  200. ret = -ETIMEDOUT;
  201. }
  202. /* Clear any pending interrupts from previous operation */
  203. val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
  204. tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val);
  205. return ret;
  206. }
  207. /*
  208. * SE atomic context save. At SC7 entry, SE driver triggers the
  209. * hardware automatically performs the context save operation.
  210. */
  211. static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
  212. {
  213. int32_t ret = 0;
  214. uint32_t val = 0;
  215. uint32_t blk_count_limit = 0;
  216. uint32_t block_count;
  217. /* Check that previous operation is finalized */
  218. ret = tegra_se_operation_prepare(se_dev);
  219. /* Read the context save progress counter: block_count
  220. * Ensure no previous context save has been triggered
  221. * SE_CTX_SAVE_AUTO.CURR_CNT == 0
  222. */
  223. if (ret == 0) {
  224. val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
  225. block_count = SE_CTX_SAVE_GET_BLK_COUNT(val);
  226. if (block_count != 0U) {
  227. ERROR("%s: ctx_save triggered multiple times\n",
  228. __func__);
  229. ret = -EALREADY;
  230. }
  231. }
  232. /* Set the destination block count when the context save complete */
  233. if (ret == 0) {
  234. blk_count_limit = block_count + se_dev->ctx_size_blks;
  235. }
  236. /* Program SE_CONFIG register as for RNG operation
  237. * SE_CONFIG.ENC_ALG = RNG
  238. * SE_CONFIG.DEC_ALG = NOP
  239. * SE_CONFIG.ENC_MODE is ignored
  240. * SE_CONFIG.DEC_MODE is ignored
  241. * SE_CONFIG.DST = MEMORY
  242. */
  243. if (ret == 0) {
  244. val = (SE_CONFIG_ENC_ALG_RNG |
  245. SE_CONFIG_DEC_ALG_NOP |
  246. SE_CONFIG_DST_MEMORY);
  247. tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
  248. tegra_se_make_data_coherent(se_dev);
  249. /* SE_CTX_SAVE operation */
  250. tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET,
  251. SE_OP_CTX_SAVE);
  252. ret = tegra_se_operation_complete(se_dev);
  253. }
  254. /* Check that context has written the correct number of blocks */
  255. if (ret == 0) {
  256. val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
  257. if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) {
  258. ERROR("%s: expected %d blocks but %d were written\n",
  259. __func__, blk_count_limit, val);
  260. ret = -ECANCELED;
  261. }
  262. }
  263. return ret;
  264. }
  265. /*
  266. * Security engine primitive operations, including normal operation
  267. * and the context save operation.
  268. */
  269. static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes,
  270. bool context_save)
  271. {
  272. uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
  273. int ret = 0;
  274. assert(se_dev);
  275. /* Use device buffers for in and out */
  276. tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf)));
  277. tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf)));
  278. /* Check that previous operation is finalized */
  279. ret = tegra_se_operation_prepare(se_dev);
  280. if (ret != 0) {
  281. goto op_error;
  282. }
  283. /* Program SE operation size */
  284. if (nblocks) {
  285. tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1);
  286. }
  287. /* Make SE LL data coherent before the SE operation */
  288. tegra_se_make_data_coherent(se_dev);
  289. /* Start hardware operation */
  290. if (context_save)
  291. tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_CTX_SAVE);
  292. else
  293. tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
  294. /* Wait for operation to finish */
  295. ret = tegra_se_operation_complete(se_dev);
  296. op_error:
  297. return ret;
  298. }
  299. /*
  300. * Normal security engine operations other than the context save
  301. */
  302. int tegra_se_start_normal_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
  303. {
  304. return tegra_se_perform_operation(se_dev, nbytes, false);
  305. }
  306. /*
  307. * Security engine context save operation
  308. */
  309. int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
  310. {
  311. return tegra_se_perform_operation(se_dev, nbytes, true);
  312. }
  313. /*
  314. * Security Engine sequence to generat SRK
  315. * SE and SE2 will generate different SRK by different
  316. * entropy seeds.
  317. */
  318. static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
  319. {
  320. int ret = PSCI_E_INTERN_FAIL;
  321. uint32_t val;
  322. /* Confgure the following hardware register settings:
  323. * SE_CONFIG.DEC_ALG = NOP
  324. * SE_CONFIG.ENC_ALG = RNG
  325. * SE_CONFIG.DST = SRK
  326. * SE_OPERATION.OP = START
  327. * SE_CRYPTO_LAST_BLOCK = 0
  328. */
  329. se_dev->src_ll_buf->last_buff_num = 0;
  330. se_dev->dst_ll_buf->last_buff_num = 0;
  331. /* Configure random number generator */
  332. if (ecid_valid)
  333. val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_ENTROPY);
  334. else
  335. val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
  336. tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
  337. /* Configure output destination = SRK */
  338. val = (SE_CONFIG_ENC_ALG_RNG |
  339. SE_CONFIG_DEC_ALG_NOP |
  340. SE_CONFIG_DST_SRK);
  341. tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
  342. /* Perform hardware operation */
  343. ret = tegra_se_start_normal_operation(se_dev, 0);
  344. return ret;
  345. }
  346. /*
  347. * Generate plain text random data to some memory location using
  348. * SE/SE2's SP800-90 random number generator. The random data size
  349. * must be some multiple of the AES block size (16 bytes).
  350. */
  351. static int tegra_se_lp_generate_random_data(tegra_se_dev_t *se_dev)
  352. {
  353. int ret = 0;
  354. uint32_t val;
  355. /* Set some arbitrary memory location to store the random data */
  356. se_dev->dst_ll_buf->last_buff_num = 0;
  357. if (!se_dev->ctx_save_buf) {
  358. ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
  359. return PSCI_E_NOT_PRESENT;
  360. }
  361. se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
  362. se_dev->ctx_save_buf)->rand_data)));
  363. se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_RANDOM_DATA_SIZE;
  364. /* Confgure the following hardware register settings:
  365. * SE_CONFIG.DEC_ALG = NOP
  366. * SE_CONFIG.ENC_ALG = RNG
  367. * SE_CONFIG.ENC_MODE = KEY192
  368. * SE_CONFIG.DST = MEMORY
  369. */
  370. val = (SE_CONFIG_ENC_ALG_RNG |
  371. SE_CONFIG_DEC_ALG_NOP |
  372. SE_CONFIG_ENC_MODE_KEY192 |
  373. SE_CONFIG_DST_MEMORY);
  374. tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
  375. /* Program the RNG options in SE_CRYPTO_CONFIG as follows:
  376. * XOR_POS = BYPASS
  377. * INPUT_SEL = RANDOM (Entropy or LFSR)
  378. * HASH_ENB = DISABLE
  379. */
  380. val = (SE_CRYPTO_INPUT_RANDOM |
  381. SE_CRYPTO_XOR_BYPASS |
  382. SE_CRYPTO_CORE_ENCRYPT |
  383. SE_CRYPTO_HASH_DISABLE |
  384. SE_CRYPTO_KEY_INDEX(RNG_AES_KEY_INDEX) |
  385. SE_CRYPTO_IV_ORIGINAL);
  386. tegra_se_write_32(se_dev, SE_CRYPTO_REG_OFFSET, val);
  387. /* Configure RNG */
  388. if (ecid_valid)
  389. val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_LFSR);
  390. else
  391. val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_LFSR);
  392. tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
  393. /* SE normal operation */
  394. ret = tegra_se_start_normal_operation(se_dev, SE_CTX_SAVE_RANDOM_DATA_SIZE);
  395. return ret;
  396. }
  397. /*
  398. * Encrypt memory blocks with SRK as part of the security engine context.
  399. * The data blocks include: random data and the known pattern data, where
  400. * the random data is the first block and known pattern is the last block.
  401. */
  402. static int tegra_se_lp_data_context_save(tegra_se_dev_t *se_dev,
  403. uint64_t src_addr, uint64_t dst_addr, uint32_t data_size)
  404. {
  405. int ret = 0;
  406. se_dev->src_ll_buf->last_buff_num = 0;
  407. se_dev->dst_ll_buf->last_buff_num = 0;
  408. se_dev->src_ll_buf->buffer[0].addr = src_addr;
  409. se_dev->src_ll_buf->buffer[0].data_len = data_size;
  410. se_dev->dst_ll_buf->buffer[0].addr = dst_addr;
  411. se_dev->dst_ll_buf->buffer[0].data_len = data_size;
  412. /* By setting the context source from memory and calling the context save
  413. * operation, the SE encrypts the memory data with SRK.
  414. */
  415. tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, SE_CTX_SAVE_SRC_MEM);
  416. ret = tegra_se_start_ctx_save_operation(se_dev, data_size);
  417. return ret;
  418. }
  419. /*
  420. * Context save the key table access control sticky bits and
  421. * security status of each key-slot. The encrypted sticky-bits are
  422. * 32 bytes (2 AES blocks) and formatted as the following structure:
  423. * { bit in registers bit in context save
  424. * SECURITY_0[4] 158
  425. * SE_RSA_KEYTABLE_ACCE4SS_1[2:0] 157:155
  426. * SE_RSA_KEYTABLE_ACCE4SS_0[2:0] 154:152
  427. * SE_RSA_SECURITY_PERKEY_0[1:0] 151:150
  428. * SE_CRYPTO_KEYTABLE_ACCESS_15[7:0] 149:142
  429. * ...,
  430. * SE_CRYPTO_KEYTABLE_ACCESS_0[7:0] 29:22
  431. * SE_CRYPTO_SECURITY_PERKEY_0[15:0] 21:6
  432. * SE_TZRAM_SECURITY_0[1:0] 5:4
  433. * SE_SECURITY_0[16] 3:3
  434. * SE_SECURITY_0[2:0] } 2:0
  435. */
  436. static int tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t *se_dev)
  437. {
  438. int ret = PSCI_E_INTERN_FAIL;
  439. uint32_t val = 0;
  440. se_dev->dst_ll_buf->last_buff_num = 0;
  441. if (!se_dev->ctx_save_buf) {
  442. ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
  443. return PSCI_E_NOT_PRESENT;
  444. }
  445. se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
  446. se_dev->ctx_save_buf)->sticky_bits)));
  447. se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_STICKY_BITS_SIZE;
  448. /*
  449. * The 1st AES block save the sticky-bits context 1 - 16 bytes (0 - 3 words).
  450. * The 2nd AES block save the sticky-bits context 17 - 32 bytes (4 - 7 words).
  451. */
  452. for (int i = 0; i < 2; i++) {
  453. val = SE_CTX_SAVE_SRC_STICKY_BITS |
  454. SE_CTX_SAVE_STICKY_WORD_QUAD(i);
  455. tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
  456. /* SE context save operation */
  457. ret = tegra_se_start_ctx_save_operation(se_dev,
  458. SE_CTX_SAVE_STICKY_BITS_SIZE);
  459. if (ret)
  460. break;
  461. se_dev->dst_ll_buf->buffer[0].addr += SE_CTX_SAVE_STICKY_BITS_SIZE;
  462. }
  463. return ret;
  464. }
  465. static int tegra_se_aeskeytable_context_save(tegra_se_dev_t *se_dev)
  466. {
  467. uint32_t val = 0;
  468. int ret = 0;
  469. se_dev->dst_ll_buf->last_buff_num = 0;
  470. if (!se_dev->ctx_save_buf) {
  471. ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
  472. ret = -EINVAL;
  473. goto aes_keytable_save_err;
  474. }
  475. /* AES key context save */
  476. for (int slot = 0; slot < TEGRA_SE_AES_KEYSLOT_COUNT; slot++) {
  477. se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
  478. ((tegra_se_context_t *)se_dev->
  479. ctx_save_buf)->key_slots[slot].key)));
  480. se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
  481. for (int i = 0; i < 2; i++) {
  482. val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
  483. SE_CTX_SAVE_KEY_INDEX(slot) |
  484. SE_CTX_SAVE_WORD_QUAD(i);
  485. tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
  486. /* SE context save operation */
  487. ret = tegra_se_start_ctx_save_operation(se_dev,
  488. TEGRA_SE_KEY_128_SIZE);
  489. if (ret) {
  490. ERROR("%s: ERR: AES key CTX_SAVE OP failed, "
  491. "slot=%d, word_quad=%d.\n",
  492. __func__, slot, i);
  493. goto aes_keytable_save_err;
  494. }
  495. se_dev->dst_ll_buf->buffer[0].addr += TEGRA_SE_KEY_128_SIZE;
  496. }
  497. /* OIV context save */
  498. se_dev->dst_ll_buf->last_buff_num = 0;
  499. se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
  500. ((tegra_se_context_t *)se_dev->
  501. ctx_save_buf)->key_slots[slot].oiv)));
  502. se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
  503. val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
  504. SE_CTX_SAVE_KEY_INDEX(slot) |
  505. SE_CTX_SAVE_WORD_QUAD_ORIG_IV;
  506. tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
  507. /* SE context save operation */
  508. ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
  509. if (ret) {
  510. ERROR("%s: ERR: OIV CTX_SAVE OP failed, slot=%d.\n",
  511. __func__, slot);
  512. goto aes_keytable_save_err;
  513. }
  514. /* UIV context save */
  515. se_dev->dst_ll_buf->last_buff_num = 0;
  516. se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
  517. ((tegra_se_context_t *)se_dev->
  518. ctx_save_buf)->key_slots[slot].uiv)));
  519. se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
  520. val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
  521. SE_CTX_SAVE_KEY_INDEX(slot) |
  522. SE_CTX_SAVE_WORD_QUAD_UPD_IV;
  523. tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
  524. /* SE context save operation */
  525. ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
  526. if (ret) {
  527. ERROR("%s: ERR: UIV CTX_SAVE OP failed, slot=%d\n",
  528. __func__, slot);
  529. goto aes_keytable_save_err;
  530. }
  531. }
  532. aes_keytable_save_err:
  533. return ret;
  534. }
  535. static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t *se_dev)
  536. {
  537. uint32_t val = 0;
  538. int ret = 0;
  539. /* For T210, First the modulus and then exponent must be
  540. * encrypted and saved. This is repeated for SLOT 0
  541. * and SLOT 1. Hence the order:
  542. * SLOT 0 modulus : RSA_KEY_INDEX : 1
  543. * SLOT 0 exponent : RSA_KEY_INDEX : 0
  544. * SLOT 1 modulus : RSA_KEY_INDEX : 3
  545. * SLOT 1 exponent : RSA_KEY_INDEX : 2
  546. */
  547. const unsigned int key_index_mod[TEGRA_SE_RSA_KEYSLOT_COUNT][2] = {
  548. /* RSA key slot 0 */
  549. {SE_RSA_KEY_INDEX_SLOT0_MOD, SE_RSA_KEY_INDEX_SLOT0_EXP},
  550. /* RSA key slot 1 */
  551. {SE_RSA_KEY_INDEX_SLOT1_MOD, SE_RSA_KEY_INDEX_SLOT1_EXP},
  552. };
  553. se_dev->dst_ll_buf->last_buff_num = 0;
  554. se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
  555. ((tegra_se_context_t *)se_dev->
  556. ctx_save_buf)->rsa_keys)));
  557. se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
  558. for (int slot = 0; slot < TEGRA_SE_RSA_KEYSLOT_COUNT; slot++) {
  559. /* loop for modulus and exponent */
  560. for (int index = 0; index < 2; index++) {
  561. for (int word_quad = 0; word_quad < 16; word_quad++) {
  562. val = SE_CTX_SAVE_SRC_RSA_KEYTABLE |
  563. SE_CTX_SAVE_RSA_KEY_INDEX(
  564. key_index_mod[slot][index]) |
  565. SE_CTX_RSA_WORD_QUAD(word_quad);
  566. tegra_se_write_32(se_dev,
  567. SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
  568. /* SE context save operation */
  569. ret = tegra_se_start_ctx_save_operation(se_dev,
  570. TEGRA_SE_KEY_128_SIZE);
  571. if (ret) {
  572. ERROR("%s: ERR: slot=%d.\n",
  573. __func__, slot);
  574. goto rsa_keytable_save_err;
  575. }
  576. /* Update the pointer to the next word quad */
  577. se_dev->dst_ll_buf->buffer[0].addr +=
  578. TEGRA_SE_KEY_128_SIZE;
  579. }
  580. }
  581. }
  582. rsa_keytable_save_err:
  583. return ret;
  584. }
  585. static int tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t *se_dev)
  586. {
  587. int ret = 0;
  588. se_dev->dst_ll_buf->last_buff_num = 0;
  589. se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
  590. ((tegra_se2_context_blob_t *)se_dev->
  591. ctx_save_buf)->pka_ctx.sticky_bits)));
  592. se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_BLOCK_SIZE;
  593. /* PKA1 sticky bits are 1 AES block (16 bytes) */
  594. tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
  595. SE_CTX_SAVE_SRC_PKA1_STICKY_BITS |
  596. SE_CTX_STICKY_WORD_QUAD_WORDS_0_3);
  597. /* SE context save operation */
  598. ret = tegra_se_start_ctx_save_operation(se_dev, 0);
  599. if (ret) {
  600. ERROR("%s: ERR: PKA1 sticky bits CTX_SAVE OP failed\n",
  601. __func__);
  602. goto pka_sticky_bits_save_err;
  603. }
  604. pka_sticky_bits_save_err:
  605. return ret;
  606. }
  607. static int tegra_se_pkakeytable_context_save(tegra_se_dev_t *se_dev)
  608. {
  609. uint32_t val = 0;
  610. int ret = 0;
  611. se_dev->dst_ll_buf->last_buff_num = 0;
  612. se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
  613. ((tegra_se2_context_blob_t *)se_dev->
  614. ctx_save_buf)->pka_ctx.pka_keys)));
  615. se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
  616. /* for each slot, save word quad 0-127 */
  617. for (int slot = 0; slot < TEGRA_SE_PKA1_KEYSLOT_COUNT; slot++) {
  618. for (int word_quad = 0; word_quad < 512/4; word_quad++) {
  619. val = SE_CTX_SAVE_SRC_PKA1_KEYTABLE |
  620. SE_CTX_PKA1_WORD_QUAD_L((slot * 128) +
  621. word_quad) |
  622. SE_CTX_PKA1_WORD_QUAD_H((slot * 128) +
  623. word_quad);
  624. tegra_se_write_32(se_dev,
  625. SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
  626. /* SE context save operation */
  627. ret = tegra_se_start_ctx_save_operation(se_dev,
  628. TEGRA_SE_KEY_128_SIZE);
  629. if (ret) {
  630. ERROR("%s: ERR: pka1 keytable ctx save error\n",
  631. __func__);
  632. goto pka_keytable_save_err;
  633. }
  634. /* Update the pointer to the next word quad */
  635. se_dev->dst_ll_buf->buffer[0].addr +=
  636. TEGRA_SE_KEY_128_SIZE;
  637. }
  638. }
  639. pka_keytable_save_err:
  640. return ret;
  641. }
  642. static int tegra_se_save_SRK(tegra_se_dev_t *se_dev)
  643. {
  644. tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
  645. SE_CTX_SAVE_SRC_SRK);
  646. /* SE context save operation */
  647. return tegra_se_start_ctx_save_operation(se_dev, 0);
  648. }
  649. /*
  650. * Lock both SE from non-TZ clients.
  651. */
  652. static inline void tegra_se_lock(tegra_se_dev_t *se_dev)
  653. {
  654. uint32_t val;
  655. assert(se_dev);
  656. val = tegra_se_read_32(se_dev, SE_SECURITY_REG_OFFSET);
  657. val |= SE_SECURITY_TZ_LOCK_SOFT(SE_SECURE);
  658. tegra_se_write_32(se_dev, SE_SECURITY_REG_OFFSET, val);
  659. }
  660. /*
  661. * Use SRK to encrypt SE state and save to TZRAM carveout
  662. */
  663. static int tegra_se_context_save_sw(tegra_se_dev_t *se_dev)
  664. {
  665. int err = 0;
  666. assert(se_dev);
  667. /* Lock entire SE/SE2 as TZ protected */
  668. tegra_se_lock(se_dev);
  669. INFO("%s: generate SRK\n", __func__);
  670. /* Generate SRK */
  671. err = tegra_se_generate_srk(se_dev);
  672. if (err) {
  673. ERROR("%s: ERR: SRK generation failed\n", __func__);
  674. return err;
  675. }
  676. INFO("%s: generate random data\n", __func__);
  677. /* Generate random data */
  678. err = tegra_se_lp_generate_random_data(se_dev);
  679. if (err) {
  680. ERROR("%s: ERR: LP random pattern generation failed\n", __func__);
  681. return err;
  682. }
  683. INFO("%s: encrypt random data\n", __func__);
  684. /* Encrypt the random data block */
  685. err = tegra_se_lp_data_context_save(se_dev,
  686. ((uint64_t)(&(((tegra_se_context_t *)se_dev->
  687. ctx_save_buf)->rand_data))),
  688. ((uint64_t)(&(((tegra_se_context_t *)se_dev->
  689. ctx_save_buf)->rand_data))),
  690. SE_CTX_SAVE_RANDOM_DATA_SIZE);
  691. if (err) {
  692. ERROR("%s: ERR: random pattern encryption failed\n", __func__);
  693. return err;
  694. }
  695. INFO("%s: save SE sticky bits\n", __func__);
  696. /* Save AES sticky bits context */
  697. err = tegra_se_lp_sticky_bits_context_save(se_dev);
  698. if (err) {
  699. ERROR("%s: ERR: sticky bits context save failed\n", __func__);
  700. return err;
  701. }
  702. INFO("%s: save AES keytables\n", __func__);
  703. /* Save AES key table context */
  704. err = tegra_se_aeskeytable_context_save(se_dev);
  705. if (err) {
  706. ERROR("%s: ERR: LP keytable save failed\n", __func__);
  707. return err;
  708. }
  709. /* RSA key slot table context save */
  710. INFO("%s: save RSA keytables\n", __func__);
  711. err = tegra_se_lp_rsakeytable_context_save(se_dev);
  712. if (err) {
  713. ERROR("%s: ERR: rsa key table context save failed\n", __func__);
  714. return err;
  715. }
  716. /* Only SE2 has an interface with PKA1; thus, PKA1's context is saved
  717. * via SE2.
  718. */
  719. if (se_dev->se_num == 2) {
  720. /* Encrypt PKA1 sticky bits on SE2 only */
  721. INFO("%s: save PKA sticky bits\n", __func__);
  722. err = tegra_se_pkakeytable_sticky_bits_save(se_dev);
  723. if (err) {
  724. ERROR("%s: ERR: PKA sticky bits context save failed\n", __func__);
  725. return err;
  726. }
  727. /* Encrypt PKA1 keyslots on SE2 only */
  728. INFO("%s: save PKA keytables\n", __func__);
  729. err = tegra_se_pkakeytable_context_save(se_dev);
  730. if (err) {
  731. ERROR("%s: ERR: PKA key table context save failed\n", __func__);
  732. return err;
  733. }
  734. }
  735. /* Encrypt known pattern */
  736. if (se_dev->se_num == 1) {
  737. err = tegra_se_lp_data_context_save(se_dev,
  738. ((uint64_t)(&se_ctx_known_pattern_data)),
  739. ((uint64_t)(&(((tegra_se_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
  740. SE_CTX_KNOWN_PATTERN_SIZE);
  741. } else if (se_dev->se_num == 2) {
  742. err = tegra_se_lp_data_context_save(se_dev,
  743. ((uint64_t)(&se_ctx_known_pattern_data)),
  744. ((uint64_t)(&(((tegra_se2_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
  745. SE_CTX_KNOWN_PATTERN_SIZE);
  746. }
  747. if (err) {
  748. ERROR("%s: ERR: save LP known pattern failure\n", __func__);
  749. return err;
  750. }
  751. /* Write lp context buffer address into PMC scratch register */
  752. if (se_dev->se_num == 1) {
  753. /* SE context address, support T210 only */
  754. mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SCRATCH43_REG_OFFSET,
  755. ((uint64_t)(se_dev->ctx_save_buf)));
  756. } else if (se_dev->se_num == 2) {
  757. /* SE2 & PKA1 context address */
  758. mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH116_OFFSET,
  759. ((uint64_t)(se_dev->ctx_save_buf)));
  760. }
  761. /* Saves SRK to PMC secure scratch registers for BootROM, which
  762. * verifies and restores the security engine context on warm boot.
  763. */
  764. err = tegra_se_save_SRK(se_dev);
  765. if (err < 0) {
  766. ERROR("%s: ERR: LP SRK save failure\n", __func__);
  767. return err;
  768. }
  769. INFO("%s: SE context save done \n", __func__);
  770. return err;
  771. }
  772. /*
  773. * Initialize the SE engine handle
  774. */
  775. void tegra_se_init(void)
  776. {
  777. uint32_t val = 0;
  778. INFO("%s: start SE init\n", __func__);
  779. /* Generate random SRK to initialize DRBG */
  780. tegra_se_generate_srk(&se_dev_1);
  781. if (tegra_chipid_is_t210_b01()) {
  782. tegra_se_generate_srk(&se_dev_2);
  783. }
  784. /* determine if ECID is valid */
  785. val = mmio_read_32(TEGRA_FUSE_BASE + FUSE_JTAG_SECUREID_VALID);
  786. ecid_valid = (val == ECID_VALID);
  787. INFO("%s: SE init done\n", __func__);
  788. }
  789. static void tegra_se_enable_clocks(void)
  790. {
  791. uint32_t val = 0;
  792. /* Enable entropy clock */
  793. val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
  794. val |= ENTROPY_CLK_ENB_BIT;
  795. mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
  796. /* De-Assert Entropy Reset */
  797. val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W);
  798. val &= ~ENTROPY_RESET_BIT;
  799. mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W, val);
  800. /*
  801. * Switch SE clock source to CLK_M, to make sure SE clock
  802. * is on when saving SE context
  803. */
  804. mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_RST_CTL_CLK_SRC_SE,
  805. SE_CLK_SRC_CLK_M);
  806. /* Enable SE clock */
  807. val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
  808. val |= SE_CLK_ENB_BIT;
  809. mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
  810. /* De-Assert SE Reset */
  811. val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V);
  812. val &= ~SE_RESET_BIT;
  813. mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V, val);
  814. }
  815. static void tegra_se_disable_clocks(void)
  816. {
  817. uint32_t val = 0;
  818. /* Disable entropy clock */
  819. val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
  820. val &= ~ENTROPY_CLK_ENB_BIT;
  821. mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
  822. /* Disable SE clock */
  823. val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
  824. val &= ~SE_CLK_ENB_BIT;
  825. mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
  826. }
  827. /*
  828. * Security engine power suspend entry point.
  829. * This function is invoked from PSCI power domain suspend handler.
  830. */
  831. int32_t tegra_se_suspend(void)
  832. {
  833. int32_t ret = 0;
  834. uint32_t val = 0;
  835. /* SE does not use SMMU in EL3, disable SMMU.
  836. * This will be re-enabled by kernel on resume */
  837. val = mmio_read_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0);
  838. val &= ~PPCS_SMMU_ENABLE;
  839. mmio_write_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0, val);
  840. tegra_se_enable_clocks();
  841. if (tegra_chipid_is_t210_b01()) {
  842. /* It is T210 B01, Atomic context save se2 and pka1 */
  843. INFO("%s: SE2/PKA1 atomic context save\n", __func__);
  844. ret = tegra_se_context_save_atomic(&se_dev_2);
  845. if (ret != 0) {
  846. ERROR("%s: SE2 ctx save failed (%d)\n", __func__, ret);
  847. }
  848. ret = tegra_se_context_save_atomic(&se_dev_1);
  849. if (ret != 0) {
  850. ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret);
  851. }
  852. } else {
  853. /* It is T210, SW context save se */
  854. INFO("%s: SE1 legacy(SW) context save\n", __func__);
  855. ret = tegra_se_context_save_sw(&se_dev_1);
  856. if (ret != 0) {
  857. ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret);
  858. }
  859. }
  860. tegra_se_disable_clocks();
  861. return ret;
  862. }
  863. /*
  864. * Save TZRAM to shadow TZRAM in AON
  865. */
  866. int32_t tegra_se_save_tzram(void)
  867. {
  868. uint32_t val = 0;
  869. int32_t ret = 0;
  870. uint32_t timeout;
  871. INFO("%s: SE TZRAM save start\n", __func__);
  872. tegra_se_enable_clocks();
  873. val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
  874. tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
  875. val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
  876. for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) &&
  877. (timeout < TIMEOUT_100MS); timeout++) {
  878. mdelay(1);
  879. val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
  880. }
  881. if (timeout == TIMEOUT_100MS) {
  882. ERROR("%s: ERR: TZRAM save timeout!\n", __func__);
  883. ret = -ETIMEDOUT;
  884. }
  885. if (ret == 0) {
  886. INFO("%s: SE TZRAM save done!\n", __func__);
  887. }
  888. tegra_se_disable_clocks();
  889. return ret;
  890. }
  891. /*
  892. * The function is invoked by SE resume
  893. */
  894. static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
  895. {
  896. uint32_t val;
  897. assert(se_dev);
  898. /* Lock RNG source to ENTROPY on resume */
  899. val = DRBG_RO_ENT_IGNORE_MEM_ENABLE |
  900. DRBG_RO_ENT_SRC_LOCK_ENABLE |
  901. DRBG_RO_ENT_SRC_ENABLE;
  902. tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
  903. /* Set a random value to SRK to initialize DRBG */
  904. tegra_se_generate_srk(se_dev);
  905. }
  906. /*
  907. * The function is invoked on SC7 resume
  908. */
  909. void tegra_se_resume(void)
  910. {
  911. tegra_se_warm_boot_resume(&se_dev_1);
  912. if (tegra_chipid_is_t210_b01()) {
  913. tegra_se_warm_boot_resume(&se_dev_2);
  914. }
  915. }