123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533 |
- /* armv8-sha256.c
- *
- * Copyright (C) 2006-2020 wolfSSL Inc.
- *
- * This file is part of wolfSSL.
- *
- * wolfSSL is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * wolfSSL is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
- */
- #ifdef HAVE_CONFIG_H
- #include <config.h>
- #endif
- #include <wolfssl/wolfcrypt/settings.h>
- #ifdef WOLFSSL_ARMASM
- #if !defined(NO_SHA256) || defined(WOLFSSL_SHA224)
- #ifdef HAVE_FIPS
- #undef HAVE_FIPS
- #endif
- #include <wolfssl/wolfcrypt/sha256.h>
- #include <wolfssl/wolfcrypt/logging.h>
- #include <wolfssl/wolfcrypt/error-crypt.h>
- #ifdef NO_INLINE
- #include <wolfssl/wolfcrypt/misc.h>
- #else
- #define WOLFSSL_MISC_INCLUDED
- #include <wolfcrypt/src/misc.c>
- #endif
- static const ALIGN32 word32 K[64] = {
- 0x428A2F98L, 0x71374491L, 0xB5C0FBCFL, 0xE9B5DBA5L, 0x3956C25BL,
- 0x59F111F1L, 0x923F82A4L, 0xAB1C5ED5L, 0xD807AA98L, 0x12835B01L,
- 0x243185BEL, 0x550C7DC3L, 0x72BE5D74L, 0x80DEB1FEL, 0x9BDC06A7L,
- 0xC19BF174L, 0xE49B69C1L, 0xEFBE4786L, 0x0FC19DC6L, 0x240CA1CCL,
- 0x2DE92C6FL, 0x4A7484AAL, 0x5CB0A9DCL, 0x76F988DAL, 0x983E5152L,
- 0xA831C66DL, 0xB00327C8L, 0xBF597FC7L, 0xC6E00BF3L, 0xD5A79147L,
- 0x06CA6351L, 0x14292967L, 0x27B70A85L, 0x2E1B2138L, 0x4D2C6DFCL,
- 0x53380D13L, 0x650A7354L, 0x766A0ABBL, 0x81C2C92EL, 0x92722C85L,
- 0xA2BFE8A1L, 0xA81A664BL, 0xC24B8B70L, 0xC76C51A3L, 0xD192E819L,
- 0xD6990624L, 0xF40E3585L, 0x106AA070L, 0x19A4C116L, 0x1E376C08L,
- 0x2748774CL, 0x34B0BCB5L, 0x391C0CB3L, 0x4ED8AA4AL, 0x5B9CCA4FL,
- 0x682E6FF3L, 0x748F82EEL, 0x78A5636FL, 0x84C87814L, 0x8CC70208L,
- 0x90BEFFFAL, 0xA4506CEBL, 0xBEF9A3F7L, 0xC67178F2L
- };
- static int InitSha256(wc_Sha256* sha256)
- {
- int ret = 0;
- if (sha256 == NULL) {
- return BAD_FUNC_ARG;
- }
- sha256->digest[0] = 0x6A09E667L;
- sha256->digest[1] = 0xBB67AE85L;
- sha256->digest[2] = 0x3C6EF372L;
- sha256->digest[3] = 0xA54FF53AL;
- sha256->digest[4] = 0x510E527FL;
- sha256->digest[5] = 0x9B05688CL;
- sha256->digest[6] = 0x1F83D9ABL;
- sha256->digest[7] = 0x5BE0CD19L;
- sha256->buffLen = 0;
- sha256->loLen = 0;
- sha256->hiLen = 0;
- return ret;
- }
- static WC_INLINE void AddLength(wc_Sha256* sha256, word32 len)
- {
- word32 tmp = sha256->loLen;
- if ((sha256->loLen += len) < tmp)
- sha256->hiLen++; /* carry low to high */
- }
- #ifdef __aarch64__
- /* ARMv8 hardware acceleration */
- static WC_INLINE int Sha256Update(wc_Sha256* sha256, const byte* data, word32 len)
- {
- word32 add;
- word32 numBlocks;
- /* only perform actions if a buffer is passed in */
- if (len > 0) {
- /* fill leftover buffer with data */
- add = min(len, WC_SHA256_BLOCK_SIZE - sha256->buffLen);
- XMEMCPY((byte*)(sha256->buffer) + sha256->buffLen, data, add);
- sha256->buffLen += add;
- data += add;
- len -= add;
- /* number of blocks in a row to complete */
- numBlocks = (len + sha256->buffLen)/WC_SHA256_BLOCK_SIZE;
- if (numBlocks > 0) {
- word32* k = (word32*)K;
- /* get leftover amount after blocks */
- add = (len + sha256->buffLen) - numBlocks * WC_SHA256_BLOCK_SIZE;
- __asm__ volatile (
- "#load leftover data\n"
- "LD1 {v0.2d-v3.2d}, %[buffer] \n"
- "#load current digest\n"
- "LD1 {v12.2d-v13.2d}, %[digest] \n"
- "MOV w8, %w[blocks] \n"
- "REV32 v0.16b, v0.16b \n"
- "REV32 v1.16b, v1.16b \n"
- "REV32 v2.16b, v2.16b \n"
- "REV32 v3.16b, v3.16b \n"
- "#load K values in \n"
- "LD1 {v16.4s-v19.4s}, [%[k]], #64 \n"
- "LD1 {v20.4s-v23.4s}, [%[k]], #64 \n"
- "MOV v14.16b, v12.16b \n" /* store digest for add at the end */
- "MOV v15.16b, v13.16b \n"
- "LD1 {v24.4s-v27.4s}, [%[k]], #64 \n"
- "LD1 {v28.4s-v31.4s}, [%[k]], #64 \n"
- /* beginning of SHA256 block operation */
- "1:\n"
- /* Round 1 */
- "MOV v4.16b, v0.16b \n"
- "ADD v0.4s, v0.4s, v16.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 2 */
- "SHA256SU0 v4.4s, v1.4s \n"
- "ADD v0.4s, v1.4s, v17.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v4.4s, v2.4s, v3.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 3 */
- "SHA256SU0 v1.4s, v2.4s \n"
- "ADD v0.4s, v2.4s, v18.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v1.4s, v3.4s, v4.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 4 */
- "SHA256SU0 v2.4s, v3.4s \n"
- "ADD v0.4s, v3.4s, v19.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v2.4s, v4.4s, v1.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 5 */
- "SHA256SU0 v3.4s, v4.4s \n"
- "ADD v0.4s, v4.4s, v20.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v3.4s, v1.4s, v2.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 6 */
- "SHA256SU0 v4.4s, v1.4s \n"
- "ADD v0.4s, v1.4s, v21.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v4.4s, v2.4s, v3.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 7 */
- "SHA256SU0 v1.4s, v2.4s \n"
- "ADD v0.4s, v2.4s, v22.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v1.4s, v3.4s, v4.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 8 */
- "SHA256SU0 v2.4s, v3.4s \n"
- "ADD v0.4s, v3.4s, v23.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v2.4s, v4.4s, v1.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 9 */
- "SHA256SU0 v3.4s, v4.4s \n"
- "ADD v0.4s, v4.4s, v24.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v3.4s, v1.4s, v2.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 10 */
- "SHA256SU0 v4.4s, v1.4s \n"
- "ADD v0.4s, v1.4s, v25.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v4.4s, v2.4s, v3.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 11 */
- "SHA256SU0 v1.4s, v2.4s \n"
- "ADD v0.4s, v2.4s, v26.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v1.4s, v3.4s, v4.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 12 */
- "SHA256SU0 v2.4s, v3.4s \n"
- "ADD v0.4s, v3.4s, v27.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v2.4s, v4.4s, v1.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 13 */
- "SHA256SU0 v3.4s, v4.4s \n"
- "ADD v0.4s, v4.4s, v28.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256SU1 v3.4s, v1.4s, v2.4s \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 14 */
- "ADD v0.4s, v1.4s, v29.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 15 */
- "ADD v0.4s, v2.4s, v30.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- /* Round 16 */
- "ADD v0.4s, v3.4s, v31.4s \n"
- "MOV v11.16b, v12.16b \n"
- "SHA256H q12, q13, v0.4s \n"
- "SHA256H2 q13, q11, v0.4s \n"
- "#Add working vars back into digest state \n"
- "SUB w8, w8, #1 \n"
- "ADD v12.4s, v12.4s, v14.4s \n"
- "ADD v13.4s, v13.4s, v15.4s \n"
- "#check if more blocks should be done\n"
- "CBZ w8, 2f \n"
- "#load in message and schedule updates \n"
- "LD1 {v0.2d-v3.2d}, [%[dataIn]], #64 \n"
- "MOV v14.16b, v12.16b \n"
- "MOV v15.16b, v13.16b \n"
- "REV32 v0.16b, v0.16b \n"
- "REV32 v1.16b, v1.16b \n"
- "REV32 v2.16b, v2.16b \n"
- "REV32 v3.16b, v3.16b \n"
- "B 1b \n" /* do another block */
- "2:\n"
- "STP q12, q13, %[out] \n"
- : [out] "=m" (sha256->digest), "=m" (sha256->buffer), "=r" (numBlocks),
- "=r" (data), "=r" (k)
- : [k] "4" (k), [digest] "m" (sha256->digest), [buffer] "m" (sha256->buffer),
- [blocks] "2" (numBlocks), [dataIn] "3" (data)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
- "v8", "v9", "v10", "v11", "v12", "v13", "v14",
- "v15", "v16", "v17", "v18", "v19", "v20", "v21",
- "v22", "v23", "v24", "v25", "v26", "v27", "v28",
- "v29", "v30", "v31", "w8"
- );
- AddLength(sha256, WC_SHA256_BLOCK_SIZE * numBlocks);
- /* copy over any remaining data leftover */
- XMEMCPY(sha256->buffer, data, add);
- sha256->buffLen = add;
- }
- }
- /* account for possibility of not used if len = 0 */
- (void)add;
- (void)numBlocks;
- return 0;
- }
- static WC_INLINE int Sha256Final(wc_Sha256* sha256, byte* hash)
- {
- byte* local;
- local = (byte*)sha256->buffer;
- AddLength(sha256, sha256->buffLen); /* before adding pads */
- local[sha256->buffLen++] = 0x80; /* add 1 */
- /* pad with zeros */
- if (sha256->buffLen > WC_SHA256_PAD_SIZE) {
- XMEMSET(&local[sha256->buffLen], 0, WC_SHA256_BLOCK_SIZE - sha256->buffLen);
- sha256->buffLen += WC_SHA256_BLOCK_SIZE - sha256->buffLen;
- __asm__ volatile (
- "LD1 {v4.2d-v7.2d}, %[buffer] \n"
- "MOV v0.16b, v4.16b \n"
- "MOV v1.16b, v5.16b \n"
- "REV32 v0.16b, v0.16b \n"
- "REV32 v1.16b, v1.16b \n"
- "MOV v2.16b, v6.16b \n"
- "MOV v3.16b, v7.16b \n"
- "REV32 v2.16b, v2.16b \n"
- "REV32 v3.16b, v3.16b \n"
- "MOV v4.16b, v0.16b \n"
- "MOV v5.16b, v1.16b \n"
- "LD1 {v20.2d-v21.2d}, %[digest] \n"
- "#SHA256 operation on updated message \n"
- "MOV v16.16b, v20.16b \n"
- "MOV v17.16b, v21.16b \n"
- "LD1 {v22.16b-v25.16b}, [%[k]], #64 \n"
- "SHA256SU0 v4.4s, v1.4s \n"
- "ADD v0.4s, v0.4s, v22.4s \n"
- "MOV v6.16b, v2.16b \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256SU1 v4.4s, v2.4s, v3.4s \n"
- "SHA256H q16, q17, v0.4s \n"
- "SHA256H2 q17, q18, v0.4s \n"
- "SHA256SU0 v5.4s, v2.4s \n"
- "ADD v1.4s, v1.4s, v23.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v7.16b, v3.16b \n"
- "SHA256SU1 v5.4s, v3.4s, v4.4s \n"
- "SHA256H q16, q17, v1.4s \n"
- "SHA256H2 q17, q18, v1.4s \n"
- "SHA256SU0 v6.4s, v3.4s \n"
- "ADD v2.4s, v2.4s, v24.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v8.16b, v4.16b \n"
- "SHA256SU1 v6.4s, v4.4s, v5.4s \n"
- "SHA256H q16, q17, v2.4s \n"
- "SHA256H2 q17, q18, v2.4s \n"
- "SHA256SU0 v7.4s, v4.4s \n"
- "ADD v3.4s, v3.4s, v25.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v9.16b, v5.16b \n"
- "SHA256SU1 v7.4s, v5.4s, v6.4s \n"
- "SHA256H q16, q17, v3.4s \n"
- "SHA256H2 q17, q18, v3.4s \n"
- "LD1 {v22.16b-v25.16b}, [%[k]], #64 \n"
- "SHA256SU0 v8.4s, v5.4s \n"
- "ADD v4.4s, v4.4s, v22.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v10.16b, v6.16b \n"
- "SHA256SU1 v8.4s, v6.4s, v7.4s \n"
- "SHA256H q16, q17, v4.4s \n"
- "SHA256H2 q17, q18, v4.4s \n"
- "SHA256SU0 v9.4s, v6.4s \n"
- "ADD v5.4s, v5.4s, v23.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v11.16b, v7.16b \n"
- "SHA256SU1 v9.4s, v7.4s, v8.4s \n"
- "SHA256H q16, q17, v5.4s \n"
- "SHA256H2 q17, q18, v5.4s \n"
- "SHA256SU0 v10.4s, v7.4s \n"
- "ADD v6.4s, v6.4s, v24.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v12.16b, v8.16b \n"
- "SHA256SU1 v10.4s, v8.4s, v9.4s \n"
- "SHA256H q16, q17, v6.4s \n"
- "SHA256H2 q17, q18, v6.4s \n"
- "SHA256SU0 v11.4s, v8.4s \n"
- "ADD v7.4s, v7.4s, v25.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v13.16b, v9.16b \n"
- "SHA256SU1 v11.4s, v9.4s, v10.4s \n"
- "SHA256H q16, q17, v7.4s \n"
- "SHA256H2 q17, q18, v7.4s \n"
- "LD1 {v22.16b-v25.16b}, [%[k]], #64 \n"
- "SHA256SU0 v12.4s, v9.4s \n"
- "ADD v8.4s, v8.4s, v22.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v14.16b, v10.16b \n"
- "SHA256SU1 v12.4s, v10.4s, v11.4s \n"
- "SHA256H q16, q17, v8.4s \n"
- "SHA256H2 q17, q18, v8.4s \n"
- "SHA256SU0 v13.4s, v10.4s \n"
- "ADD v9.4s, v9.4s, v23.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v15.16b, v11.16b \n"
- "SHA256SU1 v13.4s, v11.4s, v12.4s \n"
- "SHA256H q16, q17, v9.4s \n"
- "SHA256H2 q17, q18, v9.4s \n"
- "SHA256SU0 v14.4s, v11.4s \n"
- "ADD v10.4s, v10.4s, v24.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256SU1 v14.4s, v12.4s, v13.4s \n"
- "SHA256H q16, q17, v10.4s \n"
- "SHA256H2 q17, q18, v10.4s \n"
- "SHA256SU0 v15.4s, v12.4s \n"
- "ADD v11.4s, v11.4s, v25.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256SU1 v15.4s, v13.4s, v14.4s \n"
- "SHA256H q16, q17, v11.4s \n"
- "SHA256H2 q17, q18, v11.4s \n"
- "LD1 {v22.16b-v25.16b}, [%[k]] \n"
- "ADD v12.4s, v12.4s, v22.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256H q16, q17, v12.4s \n"
- "SHA256H2 q17, q18, v12.4s \n"
- "ADD v13.4s, v13.4s, v23.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256H q16, q17, v13.4s \n"
- "SHA256H2 q17, q18, v13.4s \n"
- "ADD v14.4s, v14.4s, v24.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256H q16, q17, v14.4s \n"
- "SHA256H2 q17, q18, v14.4s \n"
- "ADD v15.4s, v15.4s, v25.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256H q16, q17, v15.4s \n"
- "SHA256H2 q17, q18, v15.4s \n"
- "#Add working vars back into digest state \n"
- "ADD v16.4s, v16.4s, v20.4s \n"
- "ADD v17.4s, v17.4s, v21.4s \n"
- "STP q16, q17, %[out] \n"
- : [out] "=m" (sha256->digest)
- : [k] "r" (K), [digest] "m" (sha256->digest),
- [buffer] "m" (sha256->buffer)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11"
- , "v12", "v13", "v14", "v15", "v16", "v17", "v18"
- , "v19", "v20", "v21", "v22", "v23", "v24", "v25"
- );
- sha256->buffLen = 0;
- }
- XMEMSET(&local[sha256->buffLen], 0, WC_SHA256_PAD_SIZE - sha256->buffLen);
- /* put lengths in bits */
- sha256->hiLen = (sha256->loLen >> (8*sizeof(sha256->loLen) - 3)) +
- (sha256->hiLen << 3);
- sha256->loLen = sha256->loLen << 3;
- /* store lengths */
- #if defined(LITTLE_ENDIAN_ORDER)
- __asm__ volatile (
- "LD1 {v0.2d-v3.2d}, %[in] \n"
- "REV32 v0.16b, v0.16b \n"
- "REV32 v1.16b, v1.16b \n"
- "REV32 v2.16b, v2.16b \n"
- "REV32 v3.16b, v3.16b \n"
- "ST1 {v0.2d-v3.2d}, %[out] \n"
- : [out] "=m" (sha256->buffer)
- : [in] "m" (sha256->buffer)
- : "cc", "memory", "v0", "v1", "v2", "v3"
- );
- #endif
- /* ! length ordering dependent on digest endian type ! */
- XMEMCPY(&local[WC_SHA256_PAD_SIZE], &sha256->hiLen, sizeof(word32));
- XMEMCPY(&local[WC_SHA256_PAD_SIZE + sizeof(word32)], &sha256->loLen,
- sizeof(word32));
- __asm__ volatile (
- "#load in message and schedule updates \n"
- "LD1 {v4.2d-v7.2d}, %[buffer] \n"
- "MOV v0.16b, v4.16b \n"
- "MOV v1.16b, v5.16b \n"
- "MOV v2.16b, v6.16b \n"
- "MOV v3.16b, v7.16b \n"
- "LD1 {v20.2d-v21.2d}, %[digest] \n"
- "MOV v16.16b, v20.16b \n"
- "MOV v17.16b, v21.16b \n"
- "LD1 {v22.16b-v25.16b}, [%[k]], #64 \n"
- "SHA256SU0 v4.4s, v1.4s \n"
- "ADD v0.4s, v0.4s, v22.4s \n"
- "MOV v6.16b, v2.16b \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256SU1 v4.4s, v2.4s, v3.4s \n"
- "SHA256H q16, q17, v0.4s \n"
- "SHA256H2 q17, q18, v0.4s \n"
- "SHA256SU0 v5.4s, v2.4s \n"
- "ADD v1.4s, v1.4s, v23.4s \n"
- "MOV v7.16b, v3.16b \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256SU1 v5.4s, v3.4s, v4.4s \n"
- "SHA256H q16, q17, v1.4s \n"
- "SHA256H2 q17, q18, v1.4s \n"
- "SHA256SU0 v6.4s, v3.4s \n"
- "ADD v2.4s, v2.4s, v24.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v8.16b, v4.16b \n"
- "SHA256SU1 v6.4s, v4.4s, v5.4s \n"
- "SHA256H q16, q17, v2.4s \n"
- "SHA256H2 q17, q18, v2.4s \n"
- "SHA256SU0 v7.4s, v4.4s \n"
- "ADD v3.4s, v3.4s, v25.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v9.16b, v5.16b \n"
- "SHA256SU1 v7.4s, v5.4s, v6.4s \n"
- "SHA256H q16, q17, v3.4s \n"
- "SHA256H2 q17, q18, v3.4s \n"
- "LD1 {v22.16b-v25.16b}, [%[k]], #64 \n"
- "SHA256SU0 v8.4s, v5.4s \n"
- "ADD v4.4s, v4.4s, v22.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v10.16b, v6.16b \n"
- "SHA256SU1 v8.4s, v6.4s, v7.4s \n"
- "SHA256H q16, q17, v4.4s \n"
- "SHA256H2 q17, q18, v4.4s \n"
- "SHA256SU0 v9.4s, v6.4s \n"
- "ADD v5.4s, v5.4s, v23.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v11.16b, v7.16b \n"
- "SHA256SU1 v9.4s, v7.4s, v8.4s \n"
- "SHA256H q16, q17, v5.4s \n"
- "SHA256H2 q17, q18, v5.4s \n"
- "SHA256SU0 v10.4s, v7.4s \n"
- "ADD v6.4s, v6.4s, v24.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v12.16b, v8.16b \n"
- "SHA256SU1 v10.4s, v8.4s, v9.4s \n"
- "SHA256H q16, q17, v6.4s \n"
- "SHA256H2 q17, q18, v6.4s \n"
- "SHA256SU0 v11.4s, v8.4s \n"
- "ADD v7.4s, v7.4s, v25.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v13.16b, v9.16b \n"
- "SHA256SU1 v11.4s, v9.4s, v10.4s \n"
- "SHA256H q16, q17, v7.4s \n"
- "SHA256H2 q17, q18, v7.4s \n"
- "LD1 {v22.16b-v25.16b}, [%[k]], #64 \n"
- "SHA256SU0 v12.4s, v9.4s \n"
- "ADD v8.4s, v8.4s, v22.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v14.16b, v10.16b \n"
- "SHA256SU1 v12.4s, v10.4s, v11.4s \n"
- "SHA256H q16, q17, v8.4s \n"
- "SHA256H2 q17, q18, v8.4s \n"
- "SHA256SU0 v13.4s, v10.4s \n"
- "ADD v9.4s, v9.4s, v23.4s \n"
- "MOV v18.16b, v16.16b \n"
- "MOV v15.16b, v11.16b \n"
- "SHA256SU1 v13.4s, v11.4s, v12.4s \n"
- "SHA256H q16, q17, v9.4s \n"
- "SHA256H2 q17, q18, v9.4s \n"
- "SHA256SU0 v14.4s, v11.4s \n"
- "ADD v10.4s, v10.4s, v24.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256SU1 v14.4s, v12.4s, v13.4s \n"
- "SHA256H q16, q17, v10.4s \n"
- "SHA256H2 q17, q18, v10.4s \n"
- "SHA256SU0 v15.4s, v12.4s \n"
- "ADD v11.4s, v11.4s, v25.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256SU1 v15.4s, v13.4s, v14.4s \n"
- "SHA256H q16, q17, v11.4s \n"
- "SHA256H2 q17, q18, v11.4s \n"
- "LD1 {v22.16b-v25.16b}, [%[k]] \n"
- "ADD v12.4s, v12.4s, v22.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256H q16, q17, v12.4s \n"
- "SHA256H2 q17, q18, v12.4s \n"
- "ADD v13.4s, v13.4s, v23.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256H q16, q17, v13.4s \n"
- "SHA256H2 q17, q18, v13.4s \n"
- "ADD v14.4s, v14.4s, v24.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256H q16, q17, v14.4s \n"
- "SHA256H2 q17, q18, v14.4s \n"
- "ADD v15.4s, v15.4s, v25.4s \n"
- "MOV v18.16b, v16.16b \n"
- "SHA256H q16, q17, v15.4s \n"
- "SHA256H2 q17, q18, v15.4s \n"
- "#Add working vars back into digest state \n"
- "ADD v16.4s, v16.4s, v20.4s \n"
- "ADD v17.4s, v17.4s, v21.4s \n"
- "#Store value as hash output \n"
- #if defined(LITTLE_ENDIAN_ORDER)
- "REV32 v16.16b, v16.16b \n"
- #endif
- "ST1 {v16.16b}, [%[hashOut]], #16 \n"
- #if defined(LITTLE_ENDIAN_ORDER)
- "REV32 v17.16b, v17.16b \n"
- #endif
- "ST1 {v17.16b}, [%[hashOut]] \n"
- : [hashOut] "=r" (hash)
- : [k] "r" (K), [digest] "m" (sha256->digest),
- [buffer] "m" (sha256->buffer),
- "0" (hash)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
- "v8", "v9", "v10", "v11", "v12", "v13", "v14",
- "v15", "v16", "v17", "v18", "v19", "v20", "v21",
- "v22", "v23", "v24", "v25"
- );
- return 0;
- }
- #else /* not using 64 bit */
- /* ARMv8 hardware acceleration Aarch32 */
- static WC_INLINE int Sha256Update(wc_Sha256* sha256, const byte* data, word32 len)
- {
- word32 add;
- word32 numBlocks;
- /* only perform actions if a buffer is passed in */
- if (len > 0) {
- /* fill leftover buffer with data */
- add = min(len, WC_SHA256_BLOCK_SIZE - sha256->buffLen);
- XMEMCPY((byte*)(sha256->buffer) + sha256->buffLen, data, add);
- sha256->buffLen += add;
- data += add;
- len -= add;
- /* number of blocks in a row to complete */
- numBlocks = (len + sha256->buffLen)/WC_SHA256_BLOCK_SIZE;
- if (numBlocks > 0) {
- word32* bufPt = sha256->buffer;
- word32* digPt = sha256->digest;
- /* get leftover amount after blocks */
- add = (len + sha256->buffLen) - numBlocks * WC_SHA256_BLOCK_SIZE;
- __asm__ volatile (
- "#load leftover data\n"
- "VLDM %[buffer]!, {q0-q3} \n"
- "#load current digest\n"
- "VLDM %[digest], {q12-q13} \n"
- "MOV r8, %[blocks] \n"
- "VREV32.8 q0, q0 \n"
- "VREV32.8 q1, q1 \n"
- "VREV32.8 q2, q2 \n"
- "VREV32.8 q3, q3 \n"
- "VLDM %[k]! ,{q5-q8} \n"
- "VLDM %[k]! ,{q9}\n"
- "VMOV.32 q14, q12 \n" /* store digest for add at the end */
- "VMOV.32 q15, q13 \n"
- /* beginning of SHA256 block operation */
- "1:\n"
- /* Round 1 */
- "VMOV.32 q4, q0 \n"
- "VADD.i32 q0, q0, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 2 */
- "SHA256SU0.32 q4, q1 \n"
- "VADD.i32 q0, q1, q6 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q4, q2, q3 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 3 */
- "SHA256SU0.32 q1, q2 \n"
- "VADD.i32 q0, q2, q7 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q1, q3, q4 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 4 */
- "SHA256SU0.32 q2, q3 \n"
- "VADD.i32 q0, q3, q8 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q2, q4, q1 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 5 */
- "SHA256SU0.32 q3, q4 \n"
- "VADD.i32 q0, q4, q9 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q3, q1, q2 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 6 */
- "VLD1.32 {q10}, [%[k]]! \n"
- "SHA256SU0.32 q4, q1 \n"
- "VADD.i32 q0, q1, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q4, q2, q3 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 7 */
- "VLD1.32 {q10}, [%[k]]! \n"
- "SHA256SU0.32 q1, q2 \n"
- "VADD.i32 q0, q2, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q1, q3, q4 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 8 */
- "VLD1.32 {q10}, [%[k]]! \n"
- "SHA256SU0.32 q2, q3 \n"
- "VADD.i32 q0, q3, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q2, q4, q1 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 9 */
- "VLD1.32 {q10}, [%[k]]! \n"
- "SHA256SU0.32 q3, q4 \n"
- "VADD.i32 q0, q4, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q3, q1, q2 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 10 */
- "VLD1.32 {q10}, [%[k]]! \n"
- "SHA256SU0.32 q4, q1 \n"
- "VADD.i32 q0, q1, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q4, q2, q3 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 11 */
- "VLD1.32 {q10}, [%[k]]! \n"
- "SHA256SU0.32 q1, q2 \n"
- "VADD.i32 q0, q2, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q1, q3, q4 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 12 */
- "VLD1.32 {q10}, [%[k]]! \n"
- "SHA256SU0.32 q2, q3 \n"
- "VADD.i32 q0, q3, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q2, q4, q1 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 13 */
- "VLD1.32 {q10}, [%[k]]! \n"
- "SHA256SU0.32 q3, q4 \n"
- "VADD.i32 q0, q4, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q3, q1, q2 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 14 */
- "VLD1.32 {q10}, [%[k]]! \n"
- "VADD.i32 q0, q1, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 15 */
- "VLD1.32 {q10}, [%[k]]! \n"
- "VADD.i32 q0, q2, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 16 */
- "VLD1.32 {q10}, [%[k]] \n"
- "SUB r8, r8, #1 \n"
- "VADD.i32 q0, q3, q10 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- "#Add working vars back into digest state \n"
- "VADD.i32 q12, q12, q14 \n"
- "VADD.i32 q13, q13, q15 \n"
- "#check if more blocks should be done\n"
- "CMP r8, #0 \n"
- "BEQ 2f \n"
- "#load in message and schedule updates \n"
- "VLD1.32 {q0}, [%[dataIn]]! \n"
- "VLD1.32 {q1}, [%[dataIn]]! \n"
- "VLD1.32 {q2}, [%[dataIn]]! \n"
- "VLD1.32 {q3}, [%[dataIn]]! \n"
- /* reset K pointer */
- "SUB %[k], %[k], #160 \n"
- "VREV32.8 q0, q0 \n"
- "VREV32.8 q1, q1 \n"
- "VREV32.8 q2, q2 \n"
- "VREV32.8 q3, q3 \n"
- "VMOV.32 q14, q12 \n"
- "VMOV.32 q15, q13 \n"
- "B 1b \n" /* do another block */
- "2:\n"
- "VST1.32 {q12, q13}, [%[out]] \n"
- : [out] "=r" (digPt), "=r" (bufPt), "=r" (numBlocks),
- "=r" (data)
- : [k] "r" (K), [digest] "0" (digPt), [buffer] "1" (bufPt),
- [blocks] "2" (numBlocks), [dataIn] "3" (data)
- : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
- "q8", "q9", "q10", "q11", "q12", "q13", "q14",
- "q15", "r8"
- );
- AddLength(sha256, WC_SHA256_BLOCK_SIZE * numBlocks);
- /* copy over any remaining data leftover */
- XMEMCPY(sha256->buffer, data, add);
- sha256->buffLen = add;
- }
- }
- /* account for possibility of not used if len = 0 */
- (void)add;
- (void)numBlocks;
- return 0;
- }
- static WC_INLINE int Sha256Final(wc_Sha256* sha256, byte* hash)
- {
- byte* local;
- if (sha256 == NULL || hash == NULL) {
- return BAD_FUNC_ARG;
- }
- local = (byte*)sha256->buffer;
- AddLength(sha256, sha256->buffLen); /* before adding pads */
- local[sha256->buffLen++] = 0x80; /* add 1 */
- /* pad with zeros */
- if (sha256->buffLen > WC_SHA256_PAD_SIZE) {
- word32* bufPt = sha256->buffer;
- word32* digPt = sha256->digest;
- XMEMSET(&local[sha256->buffLen], 0, WC_SHA256_BLOCK_SIZE - sha256->buffLen);
- sha256->buffLen += WC_SHA256_BLOCK_SIZE - sha256->buffLen;
- __asm__ volatile (
- "#load leftover data\n"
- "VLDM %[buffer]!, {q0-q3} \n"
- "#load current digest\n"
- "VLDM %[digest], {q12-q13} \n"
- "VREV32.8 q0, q0 \n"
- "VREV32.8 q1, q1 \n"
- "VREV32.8 q2, q2 \n"
- "VREV32.8 q3, q3 \n"
- "#load K values in \n"
- "VMOV.32 q14, q12 \n" /* store digest for add at the end */
- "VMOV.32 q15, q13 \n"
- /* beginning of SHA256 block operation */
- /* Round 1 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "VMOV.32 q4, q0 \n"
- "VADD.i32 q0, q0, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 2 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q4, q1 \n"
- "VADD.i32 q0, q1, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q4, q2, q3 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 3 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q1, q2 \n"
- "VADD.i32 q0, q2, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q1, q3, q4 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 4 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q2, q3 \n"
- "VADD.i32 q0, q3, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q2, q4, q1 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 5 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q3, q4 \n"
- "VADD.i32 q0, q4, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q3, q1, q2 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 6 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q4, q1 \n"
- "VADD.i32 q0, q1, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q4, q2, q3 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 7 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q1, q2 \n"
- "VADD.i32 q0, q2, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q1, q3, q4 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 8 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q2, q3 \n"
- "VADD.i32 q0, q3, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q2, q4, q1 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 9 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q3, q4 \n"
- "VADD.i32 q0, q4, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q3, q1, q2 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 10 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q4, q1 \n"
- "VADD.i32 q0, q1, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q4, q2, q3 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 11 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q1, q2 \n"
- "VADD.i32 q0, q2, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q1, q3, q4 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 12 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q2, q3 \n"
- "VADD.i32 q0, q3, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q2, q4, q1 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 13 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q3, q4 \n"
- "VADD.i32 q0, q4, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q3, q1, q2 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 14 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "VADD.i32 q0, q1, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 15 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "VADD.i32 q0, q2, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 16 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "VADD.i32 q0, q3, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- "#Add working vars back into digest state \n"
- "VADD.i32 q12, q12, q14 \n"
- "VADD.i32 q13, q13, q15 \n"
- /* reset K pointer */
- "SUB %[k], %[k], #256 \n"
- "VST1.32 {q12, q13}, [%[out]] \n"
- : [out] "=r" (digPt), "=r" (bufPt)
- : [k] "r" (K), [digest] "0" (digPt), [buffer] "1" (bufPt)
- : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
- "q8", "q9", "q10", "q11", "q12", "q13", "q14",
- "q15"
- );
- sha256->buffLen = 0;
- }
- XMEMSET(&local[sha256->buffLen], 0, WC_SHA256_PAD_SIZE - sha256->buffLen);
- /* put lengths in bits */
- sha256->hiLen = (sha256->loLen >> (8*sizeof(sha256->loLen) - 3)) +
- (sha256->hiLen << 3);
- sha256->loLen = sha256->loLen << 3;
- /* store lengths */
- #if defined(LITTLE_ENDIAN_ORDER)
- {
- word32* bufPt = sha256->buffer;
- __asm__ volatile (
- "VLD1.32 {q0}, [%[in]] \n"
- "VREV32.8 q0, q0 \n"
- "VST1.32 {q0}, [%[out]]!\n"
- "VLD1.32 {q1}, [%[in]] \n"
- "VREV32.8 q1, q1 \n"
- "VST1.32 {q1}, [%[out]]!\n"
- "VLD1.32 {q2}, [%[in]] \n"
- "VREV32.8 q2, q2 \n"
- "VST1.32 {q2}, [%[out]]!\n"
- "VLD1.32 {q3}, [%[in]] \n"
- "VREV32.8 q3, q3 \n"
- "VST1.32 {q3}, [%[out]] \n"
- : [out] "=r" (bufPt)
- : [in] "0" (bufPt)
- : "cc", "memory", "q0", "q1", "q2", "q3"
- );
- }
- #endif
- /* ! length ordering dependent on digest endian type ! */
- XMEMCPY(&local[WC_SHA256_PAD_SIZE], &sha256->hiLen, sizeof(word32));
- XMEMCPY(&local[WC_SHA256_PAD_SIZE + sizeof(word32)], &sha256->loLen,
- sizeof(word32));
- word32* bufPt = sha256->buffer;
- word32* digPt = sha256->digest;
- __asm__ volatile (
- "#load leftover data\n"
- "VLDM %[buffer]!, {q0-q3} \n"
- "#load current digest\n"
- "VLDM %[digest], {q12-q13} \n"
- "VMOV.32 q14, q12 \n" /* store digest for add at the end */
- "VMOV.32 q15, q13 \n"
- /* beginning of SHA256 block operation */
- /* Round 1 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "VMOV.32 q4, q0 \n"
- "VADD.i32 q0, q0, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 2 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q4, q1 \n"
- "VADD.i32 q0, q1, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q4, q2, q3 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 3 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q1, q2 \n"
- "VADD.i32 q0, q2, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q1, q3, q4 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 4 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q2, q3 \n"
- "VADD.i32 q0, q3, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q2, q4, q1 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 5 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q3, q4 \n"
- "VADD.i32 q0, q4, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q3, q1, q2 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 6 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q4, q1 \n"
- "VADD.i32 q0, q1, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q4, q2, q3 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 7 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q1, q2 \n"
- "VADD.i32 q0, q2, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q1, q3, q4 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 8 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q2, q3 \n"
- "VADD.i32 q0, q3, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q2, q4, q1 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 9 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q3, q4 \n"
- "VADD.i32 q0, q4, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q3, q1, q2 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 10 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q4, q1 \n"
- "VADD.i32 q0, q1, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q4, q2, q3 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 11 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q1, q2 \n"
- "VADD.i32 q0, q2, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q1, q3, q4 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 12 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q2, q3 \n"
- "VADD.i32 q0, q3, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q2, q4, q1 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 13 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "SHA256SU0.32 q3, q4 \n"
- "VADD.i32 q0, q4, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256SU1.32 q3, q1, q2 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 14 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "VADD.i32 q0, q1, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 15 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "VADD.i32 q0, q2, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- /* Round 16 */
- "VLD1.32 {q5}, [%[k]]! \n"
- "VADD.i32 q0, q3, q5 \n"
- "VMOV.32 q11, q12 \n"
- "SHA256H.32 q12, q13, q0 \n"
- "SHA256H2.32 q13, q11, q0 \n"
- "#Add working vars back into digest state \n"
- "VADD.i32 q12, q12, q14 \n"
- "VADD.i32 q13, q13, q15 \n"
- "#Store value as hash output \n"
- #if defined(LITTLE_ENDIAN_ORDER)
- "VREV32.8 q12, q12 \n"
- #endif
- "VST1.32 {q12}, [%[hashOut]]! \n"
- #if defined(LITTLE_ENDIAN_ORDER)
- "VREV32.8 q13, q13 \n"
- #endif
- "VST1.32 {q13}, [%[hashOut]] \n"
- : [out] "=r" (digPt), "=r" (bufPt),
- [hashOut] "=r" (hash)
- : [k] "r" (K), [digest] "0" (digPt), [buffer] "1" (bufPt),
- "2" (hash)
- : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
- "q8", "q9", "q10", "q11", "q12", "q13", "q14",
- "q15"
- );
- return 0;
- }
- #endif /* __aarch64__ */
- #ifndef NO_SHA256
- int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
- {
- if (sha256 == NULL)
- return BAD_FUNC_ARG;
- sha256->heap = heap;
- (void)devId;
- return InitSha256(sha256);
- }
- int wc_InitSha256(wc_Sha256* sha256)
- {
- return wc_InitSha256_ex(sha256, NULL, INVALID_DEVID);
- }
- void wc_Sha256Free(wc_Sha256* sha256)
- {
- (void)sha256;
- }
- int wc_Sha256Update(wc_Sha256* sha256, const byte* data, word32 len)
- {
- if (sha256 == NULL || (data == NULL && len != 0)) {
- return BAD_FUNC_ARG;
- }
- return Sha256Update(sha256, data, len);
- }
- int wc_Sha256FinalRaw(wc_Sha256* sha256, byte* hash)
- {
- #ifdef LITTLE_ENDIAN_ORDER
- word32 digest[WC_SHA256_DIGEST_SIZE / sizeof(word32)];
- #endif
- if (sha256 == NULL || hash == NULL) {
- return BAD_FUNC_ARG;
- }
- #ifdef LITTLE_ENDIAN_ORDER
- ByteReverseWords((word32*)digest, (word32*)sha256->digest,
- WC_SHA256_DIGEST_SIZE);
- XMEMCPY(hash, digest, WC_SHA256_DIGEST_SIZE);
- #else
- XMEMCPY(hash, sha256->digest, WC_SHA256_DIGEST_SIZE);
- #endif
- return 0;
- }
- int wc_Sha256Final(wc_Sha256* sha256, byte* hash)
- {
- int ret;
- if (sha256 == NULL || hash == NULL) {
- return BAD_FUNC_ARG;
- }
- ret = Sha256Final(sha256, hash);
- if (ret != 0)
- return ret;
- return InitSha256(sha256); /* reset state */
- }
- int wc_Sha256GetHash(wc_Sha256* sha256, byte* hash)
- {
- int ret;
- wc_Sha256 tmpSha256;
- if (sha256 == NULL || hash == NULL)
- return BAD_FUNC_ARG;
- ret = wc_Sha256Copy(sha256, &tmpSha256);
- if (ret == 0) {
- ret = wc_Sha256Final(&tmpSha256, hash);
- }
- return ret;
- }
- #if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
- int wc_Sha256SetFlags(wc_Sha256* sha256, word32 flags)
- {
- if (sha256) {
- sha256->flags = flags;
- }
- return 0;
- }
- int wc_Sha256GetFlags(wc_Sha256* sha256, word32* flags)
- {
- if (sha256 && flags) {
- *flags = sha256->flags;
- }
- return 0;
- }
- #endif
- int wc_Sha256Copy(wc_Sha256* src, wc_Sha256* dst)
- {
- int ret = 0;
- if (src == NULL || dst == NULL)
- return BAD_FUNC_ARG;
- XMEMCPY(dst, src, sizeof(wc_Sha256));
- return ret;
- }
- #endif /* !NO_SHA256 */
- #ifdef WOLFSSL_SHA224
- static int InitSha224(wc_Sha224* sha224)
- {
- int ret = 0;
- if (sha224 == NULL) {
- return BAD_FUNC_ARG;
- }
- sha224->digest[0] = 0xc1059ed8;
- sha224->digest[1] = 0x367cd507;
- sha224->digest[2] = 0x3070dd17;
- sha224->digest[3] = 0xf70e5939;
- sha224->digest[4] = 0xffc00b31;
- sha224->digest[5] = 0x68581511;
- sha224->digest[6] = 0x64f98fa7;
- sha224->digest[7] = 0xbefa4fa4;
- sha224->buffLen = 0;
- sha224->loLen = 0;
- sha224->hiLen = 0;
- return ret;
- }
- int wc_InitSha224_ex(wc_Sha224* sha224, void* heap, int devId)
- {
- if (sha224 == NULL)
- return BAD_FUNC_ARG;
- sha224->heap = heap;
- (void)devId;
- return InitSha224(sha224);
- }
- int wc_InitSha224(wc_Sha224* sha224)
- {
- return wc_InitSha224_ex(sha224, NULL, INVALID_DEVID);
- }
- int wc_Sha224Update(wc_Sha224* sha224, const byte* data, word32 len)
- {
- int ret;
- if (sha224 == NULL || (data == NULL && len > 0)) {
- return BAD_FUNC_ARG;
- }
- ret = Sha256Update((wc_Sha256 *)sha224, data, len);
- return ret;
- }
- int wc_Sha224Final(wc_Sha224* sha224, byte* hash)
- {
- int ret;
- word32 hashTmp[WC_SHA256_DIGEST_SIZE/sizeof(word32)];
- if (sha224 == NULL || hash == NULL) {
- return BAD_FUNC_ARG;
- }
- ret = Sha256Final((wc_Sha256*)sha224, (byte*)hashTmp);
- if (ret != 0)
- return ret;
- XMEMCPY(hash, hashTmp, WC_SHA224_DIGEST_SIZE);
- return InitSha224(sha224); /* reset state */
- }
- void wc_Sha224Free(wc_Sha224* sha224)
- {
- if (sha224 == NULL)
- return;
- }
- int wc_Sha224GetHash(wc_Sha224* sha224, byte* hash)
- {
- int ret;
- wc_Sha224 tmpSha224;
- if (sha224 == NULL || hash == NULL)
- return BAD_FUNC_ARG;
- ret = wc_Sha224Copy(sha224, &tmpSha224);
- if (ret == 0) {
- ret = wc_Sha224Final(&tmpSha224, hash);
- }
- return ret;
- }
- #if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
- int wc_Sha224SetFlags(wc_Sha224* sha224, word32 flags)
- {
- if (sha224) {
- sha224->flags = flags;
- }
- return 0;
- }
- int wc_Sha224GetFlags(wc_Sha224* sha224, word32* flags)
- {
- if (sha224 && flags) {
- *flags = sha224->flags;
- }
- return 0;
- }
- #endif
- int wc_Sha224Copy(wc_Sha224* src, wc_Sha224* dst)
- {
- int ret = 0;
- if (src == NULL || dst == NULL)
- return BAD_FUNC_ARG;
- XMEMCPY(dst, src, sizeof(wc_Sha224));
- return ret;
- }
- #endif /* WOLFSSL_SHA224 */
- #endif /* !NO_SHA256 || WOLFSSL_SHA224 */
- #endif /* WOLFSSL_ARMASM */
|