271-crypto-add-ss.patch 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241
  1. --- a/drivers/crypto/Kconfig
  2. +++ b/drivers/crypto/Kconfig
  3. @@ -419,4 +419,21 @@ config CRYPTO_DEV_MXS_DCP
  4. To compile this driver as a module, choose M here: the module
  5. will be called mxs-dcp.
  6. +config CRYPTO_DEV_SUNXI_SS
  7. + tristate "Support for Allwinner Security System cryptographic accelerator"
  8. + depends on ARCH_SUNXI
  9. + select CRYPTO_MD5
  10. + select CRYPTO_SHA1
  11. + select CRYPTO_AES
  12. + select CRYPTO_DES
  13. + select CRYPTO_BLKCIPHER
  14. + help
  15. + Some Allwinner SoC have a crypto accelerator named
  16. + Security System. Select this if you want to use it.
  17. + The Security System handle AES/DES/3DES ciphers in CBC mode
  18. + and SHA1 and MD5 hash algorithms.
  19. +
  20. + To compile this driver as a module, choose M here: the module
  21. + will be called sunxi-ss.
  22. +
  23. endif # CRYPTO_HW
  24. --- a/drivers/crypto/Makefile
  25. +++ b/drivers/crypto/Makefile
  26. @@ -23,3 +23,4 @@ obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahar
  27. obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
  28. obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
  29. obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
  30. +obj-$(CONFIG_CRYPTO_DEV_SUNXI_SS) += sunxi-ss/
  31. --- /dev/null
  32. +++ b/drivers/crypto/sunxi-ss/Makefile
  33. @@ -0,0 +1,2 @@
  34. +obj-$(CONFIG_CRYPTO_DEV_SUNXI_SS) += sunxi-ss.o
  35. +sunxi-ss-y += sunxi-ss-core.o sunxi-ss-hash.o sunxi-ss-cipher.o
  36. --- /dev/null
  37. +++ b/drivers/crypto/sunxi-ss/sunxi-ss-cipher.c
  38. @@ -0,0 +1,461 @@
  39. +/*
  40. + * sunxi-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
  41. + *
  42. + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
  43. + *
  44. + * This file add support for AES cipher with 128,192,256 bits
  45. + * keysize in CBC mode.
  46. + *
  47. + * You could find the datasheet at
  48. + * http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
  49. + *
  50. + * This program is free software; you can redistribute it and/or modify
  51. + * it under the terms of the GNU General Public License as published by
  52. + * the Free Software Foundation; either version 2 of the License, or
  53. + * (at your option) any later version.
  54. + */
  55. +#include "sunxi-ss.h"
  56. +
  57. +extern struct sunxi_ss_ctx *ss;
  58. +
  59. +static int sunxi_ss_cipher(struct ablkcipher_request *areq, u32 mode)
  60. +{
  61. + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  62. + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
  63. + const char *cipher_type;
  64. +
  65. + cipher_type = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
  66. +
  67. + if (areq->nbytes == 0) {
  68. + mutex_unlock(&ss->lock);
  69. + return 0;
  70. + }
  71. +
  72. + if (areq->info == NULL) {
  73. + dev_err(ss->dev, "ERROR: Empty IV\n");
  74. + mutex_unlock(&ss->lock);
  75. + return -EINVAL;
  76. + }
  77. +
  78. + if (areq->src == NULL || areq->dst == NULL) {
  79. + dev_err(ss->dev, "ERROR: Some SGs are NULL\n");
  80. + mutex_unlock(&ss->lock);
  81. + return -EINVAL;
  82. + }
  83. +
  84. + if (strcmp("cbc(aes)", cipher_type) == 0) {
  85. + op->mode |= SS_OP_AES | SS_CBC | SS_ENABLED | mode;
  86. + return sunxi_ss_aes_poll(areq);
  87. + }
  88. + if (strcmp("cbc(des)", cipher_type) == 0) {
  89. + op->mode = SS_OP_DES | SS_CBC | SS_ENABLED | mode;
  90. + return sunxi_ss_des_poll(areq);
  91. + }
  92. + if (strcmp("cbc(des3_ede)", cipher_type) == 0) {
  93. + op->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | mode;
  94. + return sunxi_ss_des_poll(areq);
  95. + }
  96. + dev_err(ss->dev, "ERROR: Cipher %s not handled\n", cipher_type);
  97. + mutex_unlock(&ss->lock);
  98. + return -EINVAL;
  99. +}
  100. +
  101. +int sunxi_ss_cipher_encrypt(struct ablkcipher_request *areq)
  102. +{
  103. + return sunxi_ss_cipher(areq, SS_ENCRYPTION);
  104. +}
  105. +
  106. +int sunxi_ss_cipher_decrypt(struct ablkcipher_request *areq)
  107. +{
  108. + return sunxi_ss_cipher(areq, SS_DECRYPTION);
  109. +}
  110. +
  111. +int sunxi_ss_cipher_init(struct crypto_tfm *tfm)
  112. +{
  113. + struct sunxi_req_ctx *op = crypto_tfm_ctx(tfm);
  114. +
  115. + mutex_lock(&ss->lock);
  116. +
  117. + memset(op, 0, sizeof(struct sunxi_req_ctx));
  118. + return 0;
  119. +}
  120. +
  121. +int sunxi_ss_aes_poll(struct ablkcipher_request *areq)
  122. +{
  123. + u32 spaces;
  124. + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  125. + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
  126. + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  127. + /* when activating SS, the default FIFO space is 32 */
  128. + u32 rx_cnt = 32;
  129. + u32 tx_cnt = 0;
  130. + u32 v;
  131. + int i;
  132. + struct scatterlist *in_sg;
  133. + struct scatterlist *out_sg;
  134. + void *src_addr;
  135. + void *dst_addr;
  136. + unsigned int ileft = areq->nbytes;
  137. + unsigned int oleft = areq->nbytes;
  138. + unsigned int sgileft = areq->src->length;
  139. + unsigned int sgoleft = areq->dst->length;
  140. + unsigned int todo;
  141. + u32 *src32;
  142. + u32 *dst32;
  143. +
  144. + in_sg = areq->src;
  145. + out_sg = areq->dst;
  146. + for (i = 0; i < op->keylen; i += 4)
  147. + writel(*(op->key + i/4), ss->base + SS_KEY0 + i);
  148. + if (areq->info != NULL) {
  149. + for (i = 0; i < 4 && i < ivsize / 4; i++) {
  150. + v = *(u32 *)(areq->info + i * 4);
  151. + writel(v, ss->base + SS_IV0 + i * 4);
  152. + }
  153. + }
  154. + writel(op->mode, ss->base + SS_CTL);
  155. +
  156. + /* If we have only one SG, we can use kmap_atomic */
  157. + if (sg_next(in_sg) == NULL && sg_next(out_sg) == NULL) {
  158. + src_addr = kmap_atomic(sg_page(in_sg)) + in_sg->offset;
  159. + if (src_addr == NULL) {
  160. + dev_err(ss->dev, "kmap_atomic error for src SG\n");
  161. + writel(0, ss->base + SS_CTL);
  162. + mutex_unlock(&ss->lock);
  163. + return -EINVAL;
  164. + }
  165. + dst_addr = kmap_atomic(sg_page(out_sg)) + out_sg->offset;
  166. + if (dst_addr == NULL) {
  167. + dev_err(ss->dev, "kmap_atomic error for dst SG\n");
  168. + writel(0, ss->base + SS_CTL);
  169. + kunmap_atomic(src_addr);
  170. + mutex_unlock(&ss->lock);
  171. + return -EINVAL;
  172. + }
  173. + src32 = (u32 *)src_addr;
  174. + dst32 = (u32 *)dst_addr;
  175. + ileft = areq->nbytes / 4;
  176. + oleft = areq->nbytes / 4;
  177. + i = 0;
  178. + do {
  179. + if (ileft > 0 && rx_cnt > 0) {
  180. + todo = min(rx_cnt, ileft);
  181. + ileft -= todo;
  182. + do {
  183. + writel_relaxed(*src32++,
  184. + ss->base +
  185. + SS_RXFIFO);
  186. + todo--;
  187. + } while (todo > 0);
  188. + }
  189. + if (tx_cnt > 0) {
  190. + todo = min(tx_cnt, oleft);
  191. + oleft -= todo;
  192. + do {
  193. + *dst32++ = readl_relaxed(ss->base +
  194. + SS_TXFIFO);
  195. + todo--;
  196. + } while (todo > 0);
  197. + }
  198. + spaces = readl_relaxed(ss->base + SS_FCSR);
  199. + rx_cnt = SS_RXFIFO_SPACES(spaces);
  200. + tx_cnt = SS_TXFIFO_SPACES(spaces);
  201. + } while (oleft > 0);
  202. + writel(0, ss->base + SS_CTL);
  203. + kunmap_atomic(src_addr);
  204. + kunmap_atomic(dst_addr);
  205. + mutex_unlock(&ss->lock);
  206. + return 0;
  207. + }
  208. +
  209. + /* If we have more than one SG, we cannot use kmap_atomic since
  210. + * we hold the mapping too long
  211. + */
  212. + src_addr = kmap(sg_page(in_sg)) + in_sg->offset;
  213. + if (src_addr == NULL) {
  214. + dev_err(ss->dev, "KMAP error for src SG\n");
  215. + mutex_unlock(&ss->lock);
  216. + return -EINVAL;
  217. + }
  218. + dst_addr = kmap(sg_page(out_sg)) + out_sg->offset;
  219. + if (dst_addr == NULL) {
  220. + kunmap(sg_page(in_sg));
  221. + dev_err(ss->dev, "KMAP error for dst SG\n");
  222. + mutex_unlock(&ss->lock);
  223. + return -EINVAL;
  224. + }
  225. + src32 = (u32 *)src_addr;
  226. + dst32 = (u32 *)dst_addr;
  227. + ileft = areq->nbytes / 4;
  228. + oleft = areq->nbytes / 4;
  229. + sgileft = in_sg->length / 4;
  230. + sgoleft = out_sg->length / 4;
  231. + do {
  232. + spaces = readl_relaxed(ss->base + SS_FCSR);
  233. + rx_cnt = SS_RXFIFO_SPACES(spaces);
  234. + tx_cnt = SS_TXFIFO_SPACES(spaces);
  235. + todo = min3(rx_cnt, ileft, sgileft);
  236. + if (todo > 0) {
  237. + ileft -= todo;
  238. + sgileft -= todo;
  239. + }
  240. + while (todo > 0) {
  241. + writel_relaxed(*src32++, ss->base + SS_RXFIFO);
  242. + todo--;
  243. + }
  244. + if (in_sg != NULL && sgileft == 0 && ileft > 0) {
  245. + kunmap(sg_page(in_sg));
  246. + in_sg = sg_next(in_sg);
  247. + while (in_sg != NULL && in_sg->length == 0)
  248. + in_sg = sg_next(in_sg);
  249. + if (in_sg != NULL && ileft > 0) {
  250. + src_addr = kmap(sg_page(in_sg)) + in_sg->offset;
  251. + if (src_addr == NULL) {
  252. + dev_err(ss->dev, "ERROR: KMAP for src SG\n");
  253. + mutex_unlock(&ss->lock);
  254. + return -EINVAL;
  255. + }
  256. + src32 = src_addr;
  257. + sgileft = in_sg->length / 4;
  258. + }
  259. + }
  260. + /* do not test oleft since when oleft == 0 we have finished */
  261. + todo = min3(tx_cnt, oleft, sgoleft);
  262. + if (todo > 0) {
  263. + oleft -= todo;
  264. + sgoleft -= todo;
  265. + }
  266. + while (todo > 0) {
  267. + *dst32++ = readl_relaxed(ss->base + SS_TXFIFO);
  268. + todo--;
  269. + }
  270. + if (out_sg != NULL && sgoleft == 0 && oleft >= 0) {
  271. + kunmap(sg_page(out_sg));
  272. + out_sg = sg_next(out_sg);
  273. + while (out_sg != NULL && out_sg->length == 0)
  274. + out_sg = sg_next(out_sg);
  275. + if (out_sg != NULL && oleft > 0) {
  276. + dst_addr = kmap(sg_page(out_sg)) +
  277. + out_sg->offset;
  278. + if (dst_addr == NULL) {
  279. + dev_err(ss->dev, "KMAP error\n");
  280. + mutex_unlock(&ss->lock);
  281. + return -EINVAL;
  282. + }
  283. + dst32 = dst_addr;
  284. + sgoleft = out_sg->length / 4;
  285. + }
  286. + }
  287. + } while (oleft > 0);
  288. +
  289. + writel(0, ss->base + SS_CTL);
  290. + mutex_unlock(&ss->lock);
  291. + return 0;
  292. +}
  293. +
  294. +/* Pure CPU way of doing DES/3DES with SS
  295. + * Since DES and 3DES SGs could be smaller than 4 bytes, I use sg_copy_to_buffer
  296. + * for "linearize" them.
  297. + * The problem with that is that I alloc (2 x areq->nbytes) for buf_in/buf_out
  298. + * TODO: change this system
  299. + * SGsrc -> buf_in -> SS -> buf_out -> SGdst */
  300. +int sunxi_ss_des_poll(struct ablkcipher_request *areq)
  301. +{
  302. + u32 value, spaces;
  303. + size_t nb_in_sg_tx, nb_in_sg_rx;
  304. + size_t ir, it;
  305. + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  306. + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
  307. + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  308. + u32 tx_cnt = 0;
  309. + u32 rx_cnt = 0;
  310. + u32 v;
  311. + int i;
  312. + int no_chunk = 1;
  313. +
  314. + /* if we have only SGs with size multiple of 4,
  315. + * we can use the SS AES function */
  316. + struct scatterlist *in_sg;
  317. + struct scatterlist *out_sg;
  318. +
  319. + in_sg = areq->src;
  320. + out_sg = areq->dst;
  321. +
  322. + while (in_sg != NULL && no_chunk == 1) {
  323. + if ((in_sg->length % 4) != 0)
  324. + no_chunk = 0;
  325. + in_sg = sg_next(in_sg);
  326. + }
  327. + while (out_sg != NULL && no_chunk == 1) {
  328. + if ((out_sg->length % 4) != 0)
  329. + no_chunk = 0;
  330. + out_sg = sg_next(out_sg);
  331. + }
  332. +
  333. + if (no_chunk == 1)
  334. + return sunxi_ss_aes_poll(areq);
  335. + in_sg = areq->src;
  336. + out_sg = areq->dst;
  337. +
  338. + nb_in_sg_rx = sg_nents(in_sg);
  339. + nb_in_sg_tx = sg_nents(out_sg);
  340. +
  341. + mutex_lock(&ss->bufin_lock);
  342. + if (ss->buf_in == NULL) {
  343. + ss->buf_in = kmalloc(areq->nbytes, GFP_KERNEL);
  344. + ss->buf_in_size = areq->nbytes;
  345. + } else {
  346. + if (areq->nbytes > ss->buf_in_size) {
  347. + kfree(ss->buf_in);
  348. + ss->buf_in = kmalloc(areq->nbytes, GFP_KERNEL);
  349. + ss->buf_in_size = areq->nbytes;
  350. + }
  351. + }
  352. + if (ss->buf_in == NULL) {
  353. + ss->buf_in_size = 0;
  354. + mutex_unlock(&ss->bufin_lock);
  355. + dev_err(ss->dev, "Unable to allocate pages.\n");
  356. + return -ENOMEM;
  357. + }
  358. + if (ss->buf_out == NULL) {
  359. + mutex_lock(&ss->bufout_lock);
  360. + ss->buf_out = kmalloc(areq->nbytes, GFP_KERNEL);
  361. + if (ss->buf_out == NULL) {
  362. + ss->buf_out_size = 0;
  363. + mutex_unlock(&ss->bufout_lock);
  364. + dev_err(ss->dev, "Unable to allocate pages.\n");
  365. + return -ENOMEM;
  366. + }
  367. + ss->buf_out_size = areq->nbytes;
  368. + mutex_unlock(&ss->bufout_lock);
  369. + } else {
  370. + if (areq->nbytes > ss->buf_out_size) {
  371. + mutex_lock(&ss->bufout_lock);
  372. + kfree(ss->buf_out);
  373. + ss->buf_out = kmalloc(areq->nbytes, GFP_KERNEL);
  374. + if (ss->buf_out == NULL) {
  375. + ss->buf_out_size = 0;
  376. + mutex_unlock(&ss->bufout_lock);
  377. + dev_err(ss->dev, "Unable to allocate pages.\n");
  378. + return -ENOMEM;
  379. + }
  380. + ss->buf_out_size = areq->nbytes;
  381. + mutex_unlock(&ss->bufout_lock);
  382. + }
  383. + }
  384. +
  385. + sg_copy_to_buffer(areq->src, nb_in_sg_rx, ss->buf_in, areq->nbytes);
  386. +
  387. + ir = 0;
  388. + it = 0;
  389. +
  390. + for (i = 0; i < op->keylen; i += 4)
  391. + writel(*(op->key + i/4), ss->base + SS_KEY0 + i);
  392. + if (areq->info != NULL) {
  393. + for (i = 0; i < 4 && i < ivsize / 4; i++) {
  394. + v = *(u32 *)(areq->info + i * 4);
  395. + writel(v, ss->base + SS_IV0 + i * 4);
  396. + }
  397. + }
  398. + writel(op->mode, ss->base + SS_CTL);
  399. +
  400. + do {
  401. + if (rx_cnt == 0 || tx_cnt == 0) {
  402. + spaces = readl(ss->base + SS_FCSR);
  403. + rx_cnt = SS_RXFIFO_SPACES(spaces);
  404. + tx_cnt = SS_TXFIFO_SPACES(spaces);
  405. + }
  406. + if (rx_cnt > 0 && ir < areq->nbytes) {
  407. + do {
  408. + value = *(u32 *)(ss->buf_in + ir);
  409. + writel(value, ss->base + SS_RXFIFO);
  410. + ir += 4;
  411. + rx_cnt--;
  412. + } while (rx_cnt > 0 && ir < areq->nbytes);
  413. + }
  414. + if (tx_cnt > 0 && it < areq->nbytes) {
  415. + do {
  416. + value = readl(ss->base + SS_TXFIFO);
  417. + *(u32 *)(ss->buf_out + it) = value;
  418. + it += 4;
  419. + tx_cnt--;
  420. + } while (tx_cnt > 0 && it < areq->nbytes);
  421. + }
  422. + if (ir == areq->nbytes) {
  423. + mutex_unlock(&ss->bufin_lock);
  424. + ir++;
  425. + }
  426. + } while (it < areq->nbytes);
  427. +
  428. + writel(0, ss->base + SS_CTL);
  429. + mutex_unlock(&ss->lock);
  430. +
  431. + /* a simple optimization, since we dont need the hardware for this copy
  432. + * we release the lock and do the copy. With that we gain 5/10% perf */
  433. + mutex_lock(&ss->bufout_lock);
  434. + sg_copy_from_buffer(areq->dst, nb_in_sg_tx, ss->buf_out, areq->nbytes);
  435. +
  436. + mutex_unlock(&ss->bufout_lock);
  437. + return 0;
  438. +}
  439. +
  440. +/* check and set the AES key, prepare the mode to be used */
  441. +int sunxi_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  442. + unsigned int keylen)
  443. +{
  444. + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
  445. +
  446. + switch (keylen) {
  447. + case 128 / 8:
  448. + op->mode = SS_AES_128BITS;
  449. + break;
  450. + case 192 / 8:
  451. + op->mode = SS_AES_192BITS;
  452. + break;
  453. + case 256 / 8:
  454. + op->mode = SS_AES_256BITS;
  455. + break;
  456. + default:
  457. + dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
  458. + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  459. + mutex_unlock(&ss->lock);
  460. + return -EINVAL;
  461. + }
  462. + op->keylen = keylen;
  463. + memcpy(op->key, key, keylen);
  464. + return 0;
  465. +}
  466. +
  467. +/* check and set the DES key, prepare the mode to be used */
  468. +int sunxi_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  469. + unsigned int keylen)
  470. +{
  471. + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
  472. +
  473. + if (keylen != DES_KEY_SIZE) {
  474. + dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  475. + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  476. + mutex_unlock(&ss->lock);
  477. + return -EINVAL;
  478. + }
  479. + op->keylen = keylen;
  480. + memcpy(op->key, key, keylen);
  481. + return 0;
  482. +}
  483. +
  484. +/* check and set the 3DES key, prepare the mode to be used */
  485. +int sunxi_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  486. + unsigned int keylen)
  487. +{
  488. + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
  489. +
  490. + if (keylen != 3 * DES_KEY_SIZE) {
  491. + dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  492. + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  493. + mutex_unlock(&ss->lock);
  494. + return -EINVAL;
  495. + }
  496. + op->keylen = keylen;
  497. + memcpy(op->key, key, keylen);
  498. + return 0;
  499. +}
  500. --- /dev/null
  501. +++ b/drivers/crypto/sunxi-ss/sunxi-ss-core.c
  502. @@ -0,0 +1,308 @@
  503. +/*
  504. + * sunxi-ss.c - hardware cryptographic accelerator for Allwinner A20 SoC
  505. + *
  506. + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
  507. + *
  508. + * Core file which registers crypto algorithms supported by the SS.
  509. + *
  510. + * You could find the datasheet at
  511. + * http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
  512. + *
  513. + *
  514. + * This program is free software; you can redistribute it and/or modify
  515. + * it under the terms of the GNU General Public License as published by
  516. + * the Free Software Foundation; either version 2 of the License, or
  517. + * (at your option) any later version.
  518. + */
  519. +#include <linux/clk.h>
  520. +#include <linux/crypto.h>
  521. +#include <linux/io.h>
  522. +#include <linux/module.h>
  523. +#include <linux/of.h>
  524. +#include <linux/platform_device.h>
  525. +#include <crypto/scatterwalk.h>
  526. +#include <linux/scatterlist.h>
  527. +#include <linux/interrupt.h>
  528. +#include <linux/delay.h>
  529. +
  530. +#include "sunxi-ss.h"
  531. +
  532. +struct sunxi_ss_ctx *ss;
  533. +
  534. +/* General notes:
  535. + * I cannot use a key/IV cache because each time one of these change ALL stuff
  536. + * need to be re-writed (rewrite SS_KEYX ans SS_IVX).
  537. + * And for example, with dm-crypt IV changes on each request.
  538. + *
  539. + * After each request the device must be disabled with a write of 0 in SS_CTL
  540. + *
  541. + * For performance reason, we use writel_relaxed/read_relaxed for all
  542. + * operations on RX and TX FIFO and also SS_FCSR.
  543. + * For all other registers, we use writel/readl.
  544. + * See http://permalink.gmane.org/gmane.linux.ports.arm.kernel/117644
  545. + * and http://permalink.gmane.org/gmane.linux.ports.arm.kernel/117640
  546. + * */
  547. +
  548. +static struct ahash_alg sunxi_md5_alg = {
  549. + .init = sunxi_hash_init,
  550. + .update = sunxi_hash_update,
  551. + .final = sunxi_hash_final,
  552. + .finup = sunxi_hash_finup,
  553. + .digest = sunxi_hash_digest,
  554. + .halg = {
  555. + .digestsize = MD5_DIGEST_SIZE,
  556. + .base = {
  557. + .cra_name = "md5",
  558. + .cra_driver_name = "md5-sunxi-ss",
  559. + .cra_priority = 300,
  560. + .cra_alignmask = 3,
  561. + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
  562. + .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
  563. + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
  564. + .cra_module = THIS_MODULE,
  565. + .cra_type = &crypto_ahash_type
  566. + }
  567. + }
  568. +};
  569. +static struct ahash_alg sunxi_sha1_alg = {
  570. + .init = sunxi_hash_init,
  571. + .update = sunxi_hash_update,
  572. + .final = sunxi_hash_final,
  573. + .finup = sunxi_hash_finup,
  574. + .digest = sunxi_hash_digest,
  575. + .halg = {
  576. + .digestsize = SHA1_DIGEST_SIZE,
  577. + .base = {
  578. + .cra_name = "sha1",
  579. + .cra_driver_name = "sha1-sunxi-ss",
  580. + .cra_priority = 300,
  581. + .cra_alignmask = 3,
  582. + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
  583. + .cra_blocksize = SHA1_BLOCK_SIZE,
  584. + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
  585. + .cra_module = THIS_MODULE,
  586. + .cra_type = &crypto_ahash_type
  587. + }
  588. + }
  589. +};
  590. +
  591. +static struct crypto_alg sunxi_cipher_algs[] = {
  592. +{
  593. + .cra_name = "cbc(aes)",
  594. + .cra_driver_name = "cbc-aes-sunxi-ss",
  595. + .cra_priority = 300,
  596. + .cra_blocksize = AES_BLOCK_SIZE,
  597. + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
  598. + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
  599. + .cra_module = THIS_MODULE,
  600. + .cra_alignmask = 3,
  601. + .cra_type = &crypto_ablkcipher_type,
  602. + .cra_init = sunxi_ss_cipher_init,
  603. + .cra_u = {
  604. + .ablkcipher = {
  605. + .min_keysize = AES_MIN_KEY_SIZE,
  606. + .max_keysize = AES_MAX_KEY_SIZE,
  607. + .ivsize = AES_BLOCK_SIZE,
  608. + .setkey = sunxi_ss_aes_setkey,
  609. + .encrypt = sunxi_ss_cipher_encrypt,
  610. + .decrypt = sunxi_ss_cipher_decrypt,
  611. + }
  612. + }
  613. +}, {
  614. + .cra_name = "cbc(des)",
  615. + .cra_driver_name = "cbc-des-sunxi-ss",
  616. + .cra_priority = 300,
  617. + .cra_blocksize = DES_BLOCK_SIZE,
  618. + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
  619. + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
  620. + .cra_module = THIS_MODULE,
  621. + .cra_alignmask = 3,
  622. + .cra_type = &crypto_ablkcipher_type,
  623. + .cra_init = sunxi_ss_cipher_init,
  624. + .cra_u.ablkcipher = {
  625. + .min_keysize = DES_KEY_SIZE,
  626. + .max_keysize = DES_KEY_SIZE,
  627. + .ivsize = DES_BLOCK_SIZE,
  628. + .setkey = sunxi_ss_des_setkey,
  629. + .encrypt = sunxi_ss_cipher_encrypt,
  630. + .decrypt = sunxi_ss_cipher_decrypt,
  631. + }
  632. +}, {
  633. + .cra_name = "cbc(des3_ede)",
  634. + .cra_driver_name = "cbc-des3-sunxi-ss",
  635. + .cra_priority = 300,
  636. + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  637. + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
  638. + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
  639. + .cra_module = THIS_MODULE,
  640. + .cra_alignmask = 3,
  641. + .cra_type = &crypto_ablkcipher_type,
  642. + .cra_init = sunxi_ss_cipher_init,
  643. + .cra_u.ablkcipher = {
  644. + .min_keysize = DES3_EDE_KEY_SIZE,
  645. + .max_keysize = DES3_EDE_KEY_SIZE,
  646. + .ivsize = DES3_EDE_BLOCK_SIZE,
  647. + .setkey = sunxi_ss_des3_setkey,
  648. + .encrypt = sunxi_ss_cipher_encrypt,
  649. + .decrypt = sunxi_ss_cipher_decrypt,
  650. + }
  651. +}
  652. +};
  653. +
  654. +static int sunxi_ss_probe(struct platform_device *pdev)
  655. +{
  656. + struct resource *res;
  657. + u32 v;
  658. + int err;
  659. + unsigned long cr;
  660. + const unsigned long cr_ahb = 24 * 1000 * 1000;
  661. + const unsigned long cr_mod = 150 * 1000 * 1000;
  662. +
  663. + if (!pdev->dev.of_node)
  664. + return -ENODEV;
  665. +
  666. + ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
  667. + if (ss == NULL)
  668. + return -ENOMEM;
  669. +
  670. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  671. + ss->base = devm_ioremap_resource(&pdev->dev, res);
  672. + if (IS_ERR(ss->base)) {
  673. + dev_err(&pdev->dev, "Cannot request MMIO\n");
  674. + return PTR_ERR(ss->base);
  675. + }
  676. +
  677. + ss->ssclk = devm_clk_get(&pdev->dev, "mod");
  678. + if (IS_ERR(ss->ssclk)) {
  679. + err = PTR_ERR(ss->ssclk);
  680. + dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err);
  681. + return err;
  682. + }
  683. + dev_dbg(&pdev->dev, "clock ss acquired\n");
  684. +
  685. + ss->busclk = devm_clk_get(&pdev->dev, "ahb");
  686. + if (IS_ERR(ss->busclk)) {
  687. + err = PTR_ERR(ss->busclk);
  688. + dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err);
  689. + return err;
  690. + }
  691. + dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
  692. +
  693. + /* Enable the clocks */
  694. + err = clk_prepare_enable(ss->busclk);
  695. + if (err != 0) {
  696. + dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
  697. + return err;
  698. + }
  699. + err = clk_prepare_enable(ss->ssclk);
  700. + if (err != 0) {
  701. + dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
  702. + clk_disable_unprepare(ss->busclk);
  703. + return err;
  704. + }
  705. +
  706. + /* Check that clock have the correct rates gived in the datasheet */
  707. + /* Try to set the clock to the maximum allowed */
  708. + err = clk_set_rate(ss->ssclk, cr_mod);
  709. + if (err != 0) {
  710. + dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
  711. + clk_disable_unprepare(ss->ssclk);
  712. + clk_disable_unprepare(ss->busclk);
  713. + return err;
  714. + }
  715. + cr = clk_get_rate(ss->busclk);
  716. + if (cr >= cr_ahb)
  717. + dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
  718. + cr, cr / 1000000, cr_ahb);
  719. + else
  720. + dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
  721. + cr, cr / 1000000, cr_ahb);
  722. + cr = clk_get_rate(ss->ssclk);
  723. + if (cr == cr_mod)
  724. + dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
  725. + cr, cr / 1000000, cr_mod);
  726. + else {
  727. + dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
  728. + cr, cr / 1000000, cr_mod);
  729. + }
  730. +
  731. + /* TODO Does this information could be usefull ? */
  732. + writel(SS_ENABLED, ss->base + SS_CTL);
  733. + v = readl(ss->base + SS_CTL);
  734. + v >>= 16;
  735. + v &= 0x07;
  736. + dev_info(&pdev->dev, "Die ID %d\n", v);
  737. + writel(0, ss->base + SS_CTL);
  738. +
  739. + ss->dev = &pdev->dev;
  740. +
  741. + mutex_init(&ss->lock);
  742. + mutex_init(&ss->bufin_lock);
  743. + mutex_init(&ss->bufout_lock);
  744. +
  745. + err = crypto_register_ahash(&sunxi_md5_alg);
  746. + if (err)
  747. + goto error_md5;
  748. + err = crypto_register_ahash(&sunxi_sha1_alg);
  749. + if (err)
  750. + goto error_sha1;
  751. + err = crypto_register_algs(sunxi_cipher_algs,
  752. + ARRAY_SIZE(sunxi_cipher_algs));
  753. + if (err)
  754. + goto error_ciphers;
  755. +
  756. + return 0;
  757. +error_ciphers:
  758. + crypto_unregister_ahash(&sunxi_sha1_alg);
  759. +error_sha1:
  760. + crypto_unregister_ahash(&sunxi_md5_alg);
  761. +error_md5:
  762. + clk_disable_unprepare(ss->ssclk);
  763. + clk_disable_unprepare(ss->busclk);
  764. + return err;
  765. +}
  766. +
  767. +static int __exit sunxi_ss_remove(struct platform_device *pdev)
  768. +{
  769. + if (!pdev->dev.of_node)
  770. + return 0;
  771. +
  772. + crypto_unregister_ahash(&sunxi_md5_alg);
  773. + crypto_unregister_ahash(&sunxi_sha1_alg);
  774. + crypto_unregister_algs(sunxi_cipher_algs,
  775. + ARRAY_SIZE(sunxi_cipher_algs));
  776. +
  777. + if (ss->buf_in != NULL)
  778. + kfree(ss->buf_in);
  779. + if (ss->buf_out != NULL)
  780. + kfree(ss->buf_out);
  781. +
  782. + writel(0, ss->base + SS_CTL);
  783. + clk_disable_unprepare(ss->busclk);
  784. + clk_disable_unprepare(ss->ssclk);
  785. + return 0;
  786. +}
  787. +
  788. +/*============================================================================*/
  789. +/*============================================================================*/
  790. +static const struct of_device_id a20ss_crypto_of_match_table[] = {
  791. + { .compatible = "allwinner,sun7i-a20-crypto" },
  792. + {}
  793. +};
  794. +MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
  795. +
  796. +static struct platform_driver sunxi_ss_driver = {
  797. + .probe = sunxi_ss_probe,
  798. + .remove = __exit_p(sunxi_ss_remove),
  799. + .driver = {
  800. + .owner = THIS_MODULE,
  801. + .name = "sunxi-ss",
  802. + .of_match_table = a20ss_crypto_of_match_table,
  803. + },
  804. +};
  805. +
  806. +module_platform_driver(sunxi_ss_driver);
  807. +
  808. +MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
  809. +MODULE_LICENSE("GPL");
  810. +MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
  811. --- /dev/null
  812. +++ b/drivers/crypto/sunxi-ss/sunxi-ss-hash.c
  813. @@ -0,0 +1,241 @@
  814. +/*
  815. + * sunxi-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
  816. + *
  817. + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
  818. + *
  819. + * This file add support for MD5 and SHA1.
  820. + *
  821. + * You could find the datasheet at
  822. + * http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
  823. + *
  824. + * This program is free software; you can redistribute it and/or modify
  825. + * it under the terms of the GNU General Public License as published by
  826. + * the Free Software Foundation; either version 2 of the License, or
  827. + * (at your option) any later version.
  828. + */
  829. +#include "sunxi-ss.h"
  830. +
  831. +extern struct sunxi_ss_ctx *ss;
  832. +
  833. +/* sunxi_hash_init: initialize request context
  834. + * Activate the SS, and configure it for MD5 or SHA1
  835. + */
  836. +int sunxi_hash_init(struct ahash_request *areq)
  837. +{
  838. + const char *hash_type;
  839. + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  840. + struct sunxi_req_ctx *op = crypto_ahash_ctx(tfm);
  841. +
  842. + mutex_lock(&ss->lock);
  843. +
  844. + hash_type = crypto_tfm_alg_name(areq->base.tfm);
  845. +
  846. + op->byte_count = 0;
  847. + op->nbwait = 0;
  848. + op->waitbuf = 0;
  849. +
  850. + /* Enable and configure SS for MD5 or SHA1 */
  851. + if (strcmp(hash_type, "sha1") == 0)
  852. + op->mode = SS_OP_SHA1;
  853. + else
  854. + op->mode = SS_OP_MD5;
  855. +
  856. + writel(op->mode | SS_ENABLED, ss->base + SS_CTL);
  857. + return 0;
  858. +}
  859. +
  860. +/*
  861. + * sunxi_hash_update: update hash engine
  862. + *
  863. + * Could be used for both SHA1 and MD5
  864. + * Write data by step of 32bits and put then in the SS.
  865. + * The remaining data is stored (nbwait bytes) in op->waitbuf
  866. + * As an optimisation, we do not check RXFIFO_SPACES, since SS handle
  867. + * the FIFO faster than our writes
  868. + */
  869. +int sunxi_hash_update(struct ahash_request *areq)
  870. +{
  871. + u32 v;
  872. + unsigned int i = 0;/* bytes read, to be compared to areq->nbytes */
  873. + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  874. + struct sunxi_req_ctx *op = crypto_ahash_ctx(tfm);
  875. + struct scatterlist *in_sg;
  876. + unsigned int in_i = 0;/* advancement in the current SG */
  877. + void *src_addr;
  878. +
  879. + u8 *waitbuf = (u8 *)(&op->waitbuf);
  880. +
  881. + if (areq->nbytes == 0)
  882. + return 0;
  883. +
  884. + in_sg = areq->src;
  885. + do {
  886. + src_addr = kmap(sg_page(in_sg)) + in_sg->offset;
  887. + /* step 1, if some bytes remains from last SG,
  888. + * try to complete them to 4 and sent its */
  889. + if (op->nbwait > 0) {
  890. + while (op->nbwait < 4 && i < areq->nbytes &&
  891. + in_i < in_sg->length) {
  892. + waitbuf[op->nbwait] = *(u8 *)(src_addr + in_i);
  893. + i++;
  894. + in_i++;
  895. + op->nbwait++;
  896. + }
  897. + if (op->nbwait == 4) {
  898. + writel(op->waitbuf, ss->base + SS_RXFIFO);
  899. + op->byte_count += 4;
  900. + op->nbwait = 0;
  901. + op->waitbuf = 0;
  902. + }
  903. + }
  904. + /* step 2, main loop, read data 4bytes at a time */
  905. + while (i < areq->nbytes && areq->nbytes - i >= 4 &&
  906. + in_i < in_sg->length &&
  907. + in_sg->length - in_i >= 4) {
  908. + v = *(u32 *)(src_addr + in_i);
  909. + writel_relaxed(v, ss->base + SS_RXFIFO);
  910. + i += 4;
  911. + op->byte_count += 4;
  912. + in_i += 4;
  913. + }
  914. + /* step 3, if we have less than 4 bytes, copy them in waitbuf
  915. + * no need to check for op->nbwait < 4 since we cannot have
  916. + * more than 4 bytes remaining */
  917. + if (in_i < in_sg->length && in_sg->length - in_i < 4 &&
  918. + i < areq->nbytes) {
  919. + do {
  920. + waitbuf[op->nbwait] = *(u8 *)(src_addr + in_i);
  921. + op->nbwait++;
  922. + in_i++;
  923. + i++;
  924. + } while (in_i < in_sg->length && i < areq->nbytes);
  925. + }
  926. + /* we have finished the current SG, try next one */
  927. + kunmap(sg_page(in_sg));
  928. + in_sg = sg_next(in_sg);
  929. + in_i = 0;
  930. + } while (in_sg != NULL && i < areq->nbytes);
  931. + return 0;
  932. +}
  933. +
  934. +/*
  935. + * sunxi_hash_final: finalize hashing operation
  936. + *
  937. + * If we have some remaining bytes, send it.
  938. + * Then ask the SS for finalizing the hash
  939. + */
  940. +int sunxi_hash_final(struct ahash_request *areq)
  941. +{
  942. + u32 v;
  943. + unsigned int i;
  944. + int zeros;
  945. + unsigned int index, padlen;
  946. + __be64 bits;
  947. + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  948. + struct sunxi_req_ctx *op = crypto_ahash_ctx(tfm);
  949. +
  950. + if (op->nbwait > 0) {
  951. + op->waitbuf |= ((1 << 7) << (op->nbwait * 8));
  952. + writel(op->waitbuf, ss->base + SS_RXFIFO);
  953. + } else {
  954. + writel((1 << 7), ss->base + SS_RXFIFO);
  955. + }
  956. +
  957. + /* number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
  958. + * example len=0
  959. + * example len=56
  960. + * */
  961. +
  962. + /* we have already send 4 more byte of which nbwait data */
  963. + if (op->mode == SS_OP_MD5) {
  964. + index = (op->byte_count + 4) & 0x3f;
  965. + op->byte_count += op->nbwait;
  966. + if (index > 56)
  967. + zeros = (120 - index) / 4;
  968. + else
  969. + zeros = (56 - index) / 4;
  970. + } else {
  971. + op->byte_count += op->nbwait;
  972. + index = op->byte_count & 0x3f;
  973. + padlen = (index < 56) ? (56 - index) : ((64+56) - index);
  974. + zeros = (padlen - 1) / 4;
  975. + }
  976. + for (i = 0; i < zeros; i++)
  977. + writel(0, ss->base + SS_RXFIFO);
  978. +
  979. + /* write the lenght */
  980. + if (op->mode == SS_OP_SHA1) {
  981. + bits = cpu_to_be64(op->byte_count << 3);
  982. + writel(bits & 0xffffffff, ss->base + SS_RXFIFO);
  983. + writel((bits >> 32) & 0xffffffff, ss->base + SS_RXFIFO);
  984. + } else {
  985. + writel((op->byte_count << 3) & 0xffffffff,
  986. + ss->base + SS_RXFIFO);
  987. + writel((op->byte_count >> 29) & 0xffffffff,
  988. + ss->base + SS_RXFIFO);
  989. + }
  990. +
  991. + /* stop the hashing */
  992. + v = readl(ss->base + SS_CTL);
  993. + v |= SS_DATA_END;
  994. + writel(v, ss->base + SS_CTL);
  995. +
  996. + /* check the end */
  997. + /* The timeout could happend only in case of bad overcloking */
  998. +#define SS_TIMEOUT 100
  999. + i = 0;
  1000. + do {
  1001. + v = readl(ss->base + SS_CTL);
  1002. + i++;
  1003. + } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
  1004. + if (i >= SS_TIMEOUT) {
  1005. + dev_err(ss->dev, "ERROR: hash end timeout %d>%d\n",
  1006. + i, SS_TIMEOUT);
  1007. + writel(0, ss->base + SS_CTL);
  1008. + mutex_unlock(&ss->lock);
  1009. + return -1;
  1010. + }
  1011. +
  1012. + if (op->mode == SS_OP_SHA1) {
  1013. + for (i = 0; i < 5; i++) {
  1014. + v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
  1015. + memcpy(areq->result + i * 4, &v, 4);
  1016. + }
  1017. + } else {
  1018. + for (i = 0; i < 4; i++) {
  1019. + v = readl(ss->base + SS_MD0 + i * 4);
  1020. + memcpy(areq->result + i * 4, &v, 4);
  1021. + }
  1022. + }
  1023. + writel(0, ss->base + SS_CTL);
  1024. + mutex_unlock(&ss->lock);
  1025. + return 0;
  1026. +}
  1027. +
  1028. +/* sunxi_hash_finup: finalize hashing operation after an update */
  1029. +int sunxi_hash_finup(struct ahash_request *areq)
  1030. +{
  1031. + int err;
  1032. +
  1033. + err = sunxi_hash_update(areq);
  1034. + if (err != 0)
  1035. + return err;
  1036. +
  1037. + return sunxi_hash_final(areq);
  1038. +}
  1039. +
  1040. +/* combo of init/update/final functions */
  1041. +int sunxi_hash_digest(struct ahash_request *areq)
  1042. +{
  1043. + int err;
  1044. +
  1045. + err = sunxi_hash_init(areq);
  1046. + if (err != 0)
  1047. + return err;
  1048. +
  1049. + err = sunxi_hash_update(areq);
  1050. + if (err != 0)
  1051. + return err;
  1052. +
  1053. + return sunxi_hash_final(areq);
  1054. +}
  1055. --- /dev/null
  1056. +++ b/drivers/crypto/sunxi-ss/sunxi-ss.h
  1057. @@ -0,0 +1,183 @@
  1058. +/*
  1059. + * sunxi-ss.c - hardware cryptographic accelerator for Allwinner A20 SoC
  1060. + *
  1061. + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
  1062. + *
  1063. + * Support AES cipher with 128,192,256 bits keysize.
  1064. + * Support MD5 and SHA1 hash algorithms.
  1065. + * Support DES and 3DES
  1066. + * Support PRNG
  1067. + *
  1068. + * You could find the datasheet at
  1069. + * http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
  1070. + *
  1071. + *
  1072. + * Licensed under the GPL-2.
  1073. + */
  1074. +
  1075. +#include <linux/clk.h>
  1076. +#include <linux/crypto.h>
  1077. +#include <linux/io.h>
  1078. +#include <linux/module.h>
  1079. +#include <linux/of.h>
  1080. +#include <linux/platform_device.h>
  1081. +#include <crypto/scatterwalk.h>
  1082. +#include <linux/scatterlist.h>
  1083. +#include <linux/interrupt.h>
  1084. +#include <linux/delay.h>
  1085. +#include <crypto/md5.h>
  1086. +#include <crypto/sha.h>
  1087. +#include <crypto/hash.h>
  1088. +#include <crypto/internal/hash.h>
  1089. +#include <crypto/aes.h>
  1090. +#include <crypto/des.h>
  1091. +#include <crypto/internal/rng.h>
  1092. +
  1093. +#define SS_CTL 0x00
  1094. +#define SS_KEY0 0x04
  1095. +#define SS_KEY1 0x08
  1096. +#define SS_KEY2 0x0C
  1097. +#define SS_KEY3 0x10
  1098. +#define SS_KEY4 0x14
  1099. +#define SS_KEY5 0x18
  1100. +#define SS_KEY6 0x1C
  1101. +#define SS_KEY7 0x20
  1102. +
  1103. +#define SS_IV0 0x24
  1104. +#define SS_IV1 0x28
  1105. +#define SS_IV2 0x2C
  1106. +#define SS_IV3 0x30
  1107. +
  1108. +#define SS_CNT0 0x34
  1109. +#define SS_CNT1 0x38
  1110. +#define SS_CNT2 0x3C
  1111. +#define SS_CNT3 0x40
  1112. +
  1113. +#define SS_FCSR 0x44
  1114. +#define SS_ICSR 0x48
  1115. +
  1116. +#define SS_MD0 0x4C
  1117. +#define SS_MD1 0x50
  1118. +#define SS_MD2 0x54
  1119. +#define SS_MD3 0x58
  1120. +#define SS_MD4 0x5C
  1121. +
  1122. +#define SS_RXFIFO 0x200
  1123. +#define SS_TXFIFO 0x204
  1124. +
  1125. +/* SS_CTL configuration values */
  1126. +
  1127. +/* PRNG generator mode - bit 15 */
  1128. +#define SS_PRNG_ONESHOT (0 << 15)
  1129. +#define SS_PRNG_CONTINUE (1 << 15)
  1130. +
  1131. +/* SS operation mode - bits 12-13 */
  1132. +#define SS_ECB (0 << 12)
  1133. +#define SS_CBC (1 << 12)
  1134. +#define SS_CNT (2 << 12)
  1135. +
  1136. +/* Counter width for CNT mode - bits 10-11 */
  1137. +#define SS_CNT_16BITS (0 << 10)
  1138. +#define SS_CNT_32BITS (1 << 10)
  1139. +#define SS_CNT_64BITS (2 << 10)
  1140. +
  1141. +/* Key size for AES - bits 8-9 */
  1142. +#define SS_AES_128BITS (0 << 8)
  1143. +#define SS_AES_192BITS (1 << 8)
  1144. +#define SS_AES_256BITS (2 << 8)
  1145. +
  1146. +/* Operation direction - bit 7 */
  1147. +#define SS_ENCRYPTION (0 << 7)
  1148. +#define SS_DECRYPTION (1 << 7)
  1149. +
  1150. +/* SS Method - bits 4-6 */
  1151. +#define SS_OP_AES (0 << 4)
  1152. +#define SS_OP_DES (1 << 4)
  1153. +#define SS_OP_3DES (2 << 4)
  1154. +#define SS_OP_SHA1 (3 << 4)
  1155. +#define SS_OP_MD5 (4 << 4)
  1156. +#define SS_OP_PRNG (5 << 4)
  1157. +
  1158. +/* Data end bit - bit 2 */
  1159. +#define SS_DATA_END (1 << 2)
  1160. +
  1161. +/* PRNG start bit - bit 1 */
  1162. +#define SS_PRNG_START (1 << 1)
  1163. +
  1164. +/* SS Enable bit - bit 0 */
  1165. +#define SS_DISABLED (0 << 0)
  1166. +#define SS_ENABLED (1 << 0)
  1167. +
  1168. +/* SS_FCSR configuration values */
  1169. +/* RX FIFO status - bit 30 */
  1170. +#define SS_RXFIFO_FREE (1 << 30)
  1171. +
  1172. +/* RX FIFO empty spaces - bits 24-29 */
  1173. +#define SS_RXFIFO_SPACES(val) (((val) >> 24) & 0x3f)
  1174. +
  1175. +/* TX FIFO status - bit 22 */
  1176. +#define SS_TXFIFO_AVAILABLE (1 << 22)
  1177. +
  1178. +/* TX FIFO available spaces - bits 16-21 */
  1179. +#define SS_TXFIFO_SPACES(val) (((val) >> 16) & 0x3f)
  1180. +
  1181. +#define SS_RXFIFO_EMP_INT_PENDING (1 << 10)
  1182. +#define SS_TXFIFO_AVA_INT_PENDING (1 << 8)
  1183. +#define SS_RXFIFO_EMP_INT_ENABLE (1 << 2)
  1184. +#define SS_TXFIFO_AVA_INT_ENABLE (1 << 0)
  1185. +
  1186. +/* SS_ICSR configuration values */
  1187. +#define SS_ICS_DRQ_ENABLE (1 << 4)
  1188. +
  1189. +struct sunxi_ss_ctx {
  1190. + void __iomem *base;
  1191. + int irq;
  1192. + struct clk *busclk;
  1193. + struct clk *ssclk;
  1194. + struct device *dev;
  1195. + struct resource *res;
  1196. + void *buf_in; /* pointer to data to be uploaded to the device */
  1197. + size_t buf_in_size; /* size of buf_in */
  1198. + void *buf_out;
  1199. + size_t buf_out_size;
  1200. + struct mutex lock; /* control the use of the device */
  1201. + struct mutex bufout_lock; /* control the use of buf_out*/
  1202. + struct mutex bufin_lock; /* control the sue of buf_in*/
  1203. +};
  1204. +
  1205. +struct sunxi_req_ctx {
  1206. + u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */
  1207. + u32 keylen;
  1208. + u32 mode;
  1209. + u64 byte_count; /* number of bytes "uploaded" to the device */
  1210. + u32 waitbuf; /* a partial word waiting to be completed and
  1211. + uploaded to the device */
  1212. + /* number of bytes to be uploaded in the waitbuf word */
  1213. + unsigned int nbwait;
  1214. +};
  1215. +
  1216. +#define SS_SEED_LEN (192/8)
  1217. +#define SS_DATA_LEN (160/8)
  1218. +
  1219. +struct prng_context {
  1220. + u32 seed[SS_SEED_LEN/4];
  1221. + unsigned int slen;
  1222. +};
  1223. +
  1224. +int sunxi_hash_init(struct ahash_request *areq);
  1225. +int sunxi_hash_update(struct ahash_request *areq);
  1226. +int sunxi_hash_final(struct ahash_request *areq);
  1227. +int sunxi_hash_finup(struct ahash_request *areq);
  1228. +int sunxi_hash_digest(struct ahash_request *areq);
  1229. +
  1230. +int sunxi_ss_aes_poll(struct ablkcipher_request *areq);
  1231. +int sunxi_ss_des_poll(struct ablkcipher_request *areq);
  1232. +int sunxi_ss_cipher_init(struct crypto_tfm *tfm);
  1233. +int sunxi_ss_cipher_encrypt(struct ablkcipher_request *areq);
  1234. +int sunxi_ss_cipher_decrypt(struct ablkcipher_request *areq);
  1235. +int sunxi_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  1236. + unsigned int keylen);
  1237. +int sunxi_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  1238. + unsigned int keylen);
  1239. +int sunxi_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  1240. + unsigned int keylen);