12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859 |
- /*
- * Copyright 2010-2021 The OpenSSL Project Authors. All Rights Reserved.
- *
- * Licensed under the Apache License 2.0 (the "License"). You may not use
- * this file except in compliance with the License. You can obtain a copy
- * in the file LICENSE in the source distribution or at
- * https://www.openssl.org/source/license.html
- */
- #include <string.h>
- #include <openssl/crypto.h>
- #include "internal/cryptlib.h"
- #include "internal/endian.h"
- #include "crypto/modes.h"
- #if defined(__GNUC__) && !defined(STRICT_ALIGNMENT)
- typedef size_t size_t_aX __attribute((__aligned__(1)));
- #else
- typedef size_t size_t_aX;
- #endif
- #if defined(BSWAP4) && defined(STRICT_ALIGNMENT)
- /* redefine, because alignment is ensured */
- # undef GETU32
- # define GETU32(p) BSWAP4(*(const u32 *)(p))
- # undef PUTU32
- # define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v)
- #endif
- #define PACK(s) ((size_t)(s)<<(sizeof(size_t)*8-16))
- #define REDUCE1BIT(V) do { \
- if (sizeof(size_t)==8) { \
- u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); \
- V.lo = (V.hi<<63)|(V.lo>>1); \
- V.hi = (V.hi>>1 )^T; \
- } \
- else { \
- u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); \
- V.lo = (V.hi<<63)|(V.lo>>1); \
- V.hi = (V.hi>>1 )^((u64)T<<32); \
- } \
- } while(0)
- /*-
- * Even though permitted values for TABLE_BITS are 8, 4 and 1, it should
- * never be set to 8. 8 is effectively reserved for testing purposes.
- * TABLE_BITS>1 are lookup-table-driven implementations referred to as
- * "Shoup's" in GCM specification. In other words OpenSSL does not cover
- * whole spectrum of possible table driven implementations. Why? In
- * non-"Shoup's" case memory access pattern is segmented in such manner,
- * that it's trivial to see that cache timing information can reveal
- * fair portion of intermediate hash value. Given that ciphertext is
- * always available to attacker, it's possible for him to attempt to
- * deduce secret parameter H and if successful, tamper with messages
- * [which is nothing but trivial in CTR mode]. In "Shoup's" case it's
- * not as trivial, but there is no reason to believe that it's resistant
- * to cache-timing attack. And the thing about "8-bit" implementation is
- * that it consumes 16 (sixteen) times more memory, 4KB per individual
- * key + 1KB shared. Well, on pros side it should be twice as fast as
- * "4-bit" version. And for gcc-generated x86[_64] code, "8-bit" version
- * was observed to run ~75% faster, closer to 100% for commercial
- * compilers... Yet "4-bit" procedure is preferred, because it's
- * believed to provide better security-performance balance and adequate
- * all-round performance. "All-round" refers to things like:
- *
- * - shorter setup time effectively improves overall timing for
- * handling short messages;
- * - larger table allocation can become unbearable because of VM
- * subsystem penalties (for example on Windows large enough free
- * results in VM working set trimming, meaning that consequent
- * malloc would immediately incur working set expansion);
- * - larger table has larger cache footprint, which can affect
- * performance of other code paths (not necessarily even from same
- * thread in Hyper-Threading world);
- *
- * Value of 1 is not appropriate for performance reasons.
- */
- #if TABLE_BITS==8
- static void gcm_init_8bit(u128 Htable[256], u64 H[2])
- {
- int i, j;
- u128 V;
- Htable[0].hi = 0;
- Htable[0].lo = 0;
- V.hi = H[0];
- V.lo = H[1];
- for (Htable[128] = V, i = 64; i > 0; i >>= 1) {
- REDUCE1BIT(V);
- Htable[i] = V;
- }
- for (i = 2; i < 256; i <<= 1) {
- u128 *Hi = Htable + i, H0 = *Hi;
- for (j = 1; j < i; ++j) {
- Hi[j].hi = H0.hi ^ Htable[j].hi;
- Hi[j].lo = H0.lo ^ Htable[j].lo;
- }
- }
- }
- static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256])
- {
- u128 Z = { 0, 0 };
- const u8 *xi = (const u8 *)Xi + 15;
- size_t rem, n = *xi;
- DECLARE_IS_ENDIAN;
- static const size_t rem_8bit[256] = {
- PACK(0x0000), PACK(0x01C2), PACK(0x0384), PACK(0x0246),
- PACK(0x0708), PACK(0x06CA), PACK(0x048C), PACK(0x054E),
- PACK(0x0E10), PACK(0x0FD2), PACK(0x0D94), PACK(0x0C56),
- PACK(0x0918), PACK(0x08DA), PACK(0x0A9C), PACK(0x0B5E),
- PACK(0x1C20), PACK(0x1DE2), PACK(0x1FA4), PACK(0x1E66),
- PACK(0x1B28), PACK(0x1AEA), PACK(0x18AC), PACK(0x196E),
- PACK(0x1230), PACK(0x13F2), PACK(0x11B4), PACK(0x1076),
- PACK(0x1538), PACK(0x14FA), PACK(0x16BC), PACK(0x177E),
- PACK(0x3840), PACK(0x3982), PACK(0x3BC4), PACK(0x3A06),
- PACK(0x3F48), PACK(0x3E8A), PACK(0x3CCC), PACK(0x3D0E),
- PACK(0x3650), PACK(0x3792), PACK(0x35D4), PACK(0x3416),
- PACK(0x3158), PACK(0x309A), PACK(0x32DC), PACK(0x331E),
- PACK(0x2460), PACK(0x25A2), PACK(0x27E4), PACK(0x2626),
- PACK(0x2368), PACK(0x22AA), PACK(0x20EC), PACK(0x212E),
- PACK(0x2A70), PACK(0x2BB2), PACK(0x29F4), PACK(0x2836),
- PACK(0x2D78), PACK(0x2CBA), PACK(0x2EFC), PACK(0x2F3E),
- PACK(0x7080), PACK(0x7142), PACK(0x7304), PACK(0x72C6),
- PACK(0x7788), PACK(0x764A), PACK(0x740C), PACK(0x75CE),
- PACK(0x7E90), PACK(0x7F52), PACK(0x7D14), PACK(0x7CD6),
- PACK(0x7998), PACK(0x785A), PACK(0x7A1C), PACK(0x7BDE),
- PACK(0x6CA0), PACK(0x6D62), PACK(0x6F24), PACK(0x6EE6),
- PACK(0x6BA8), PACK(0x6A6A), PACK(0x682C), PACK(0x69EE),
- PACK(0x62B0), PACK(0x6372), PACK(0x6134), PACK(0x60F6),
- PACK(0x65B8), PACK(0x647A), PACK(0x663C), PACK(0x67FE),
- PACK(0x48C0), PACK(0x4902), PACK(0x4B44), PACK(0x4A86),
- PACK(0x4FC8), PACK(0x4E0A), PACK(0x4C4C), PACK(0x4D8E),
- PACK(0x46D0), PACK(0x4712), PACK(0x4554), PACK(0x4496),
- PACK(0x41D8), PACK(0x401A), PACK(0x425C), PACK(0x439E),
- PACK(0x54E0), PACK(0x5522), PACK(0x5764), PACK(0x56A6),
- PACK(0x53E8), PACK(0x522A), PACK(0x506C), PACK(0x51AE),
- PACK(0x5AF0), PACK(0x5B32), PACK(0x5974), PACK(0x58B6),
- PACK(0x5DF8), PACK(0x5C3A), PACK(0x5E7C), PACK(0x5FBE),
- PACK(0xE100), PACK(0xE0C2), PACK(0xE284), PACK(0xE346),
- PACK(0xE608), PACK(0xE7CA), PACK(0xE58C), PACK(0xE44E),
- PACK(0xEF10), PACK(0xEED2), PACK(0xEC94), PACK(0xED56),
- PACK(0xE818), PACK(0xE9DA), PACK(0xEB9C), PACK(0xEA5E),
- PACK(0xFD20), PACK(0xFCE2), PACK(0xFEA4), PACK(0xFF66),
- PACK(0xFA28), PACK(0xFBEA), PACK(0xF9AC), PACK(0xF86E),
- PACK(0xF330), PACK(0xF2F2), PACK(0xF0B4), PACK(0xF176),
- PACK(0xF438), PACK(0xF5FA), PACK(0xF7BC), PACK(0xF67E),
- PACK(0xD940), PACK(0xD882), PACK(0xDAC4), PACK(0xDB06),
- PACK(0xDE48), PACK(0xDF8A), PACK(0xDDCC), PACK(0xDC0E),
- PACK(0xD750), PACK(0xD692), PACK(0xD4D4), PACK(0xD516),
- PACK(0xD058), PACK(0xD19A), PACK(0xD3DC), PACK(0xD21E),
- PACK(0xC560), PACK(0xC4A2), PACK(0xC6E4), PACK(0xC726),
- PACK(0xC268), PACK(0xC3AA), PACK(0xC1EC), PACK(0xC02E),
- PACK(0xCB70), PACK(0xCAB2), PACK(0xC8F4), PACK(0xC936),
- PACK(0xCC78), PACK(0xCDBA), PACK(0xCFFC), PACK(0xCE3E),
- PACK(0x9180), PACK(0x9042), PACK(0x9204), PACK(0x93C6),
- PACK(0x9688), PACK(0x974A), PACK(0x950C), PACK(0x94CE),
- PACK(0x9F90), PACK(0x9E52), PACK(0x9C14), PACK(0x9DD6),
- PACK(0x9898), PACK(0x995A), PACK(0x9B1C), PACK(0x9ADE),
- PACK(0x8DA0), PACK(0x8C62), PACK(0x8E24), PACK(0x8FE6),
- PACK(0x8AA8), PACK(0x8B6A), PACK(0x892C), PACK(0x88EE),
- PACK(0x83B0), PACK(0x8272), PACK(0x8034), PACK(0x81F6),
- PACK(0x84B8), PACK(0x857A), PACK(0x873C), PACK(0x86FE),
- PACK(0xA9C0), PACK(0xA802), PACK(0xAA44), PACK(0xAB86),
- PACK(0xAEC8), PACK(0xAF0A), PACK(0xAD4C), PACK(0xAC8E),
- PACK(0xA7D0), PACK(0xA612), PACK(0xA454), PACK(0xA596),
- PACK(0xA0D8), PACK(0xA11A), PACK(0xA35C), PACK(0xA29E),
- PACK(0xB5E0), PACK(0xB422), PACK(0xB664), PACK(0xB7A6),
- PACK(0xB2E8), PACK(0xB32A), PACK(0xB16C), PACK(0xB0AE),
- PACK(0xBBF0), PACK(0xBA32), PACK(0xB874), PACK(0xB9B6),
- PACK(0xBCF8), PACK(0xBD3A), PACK(0xBF7C), PACK(0xBEBE)
- };
- while (1) {
- Z.hi ^= Htable[n].hi;
- Z.lo ^= Htable[n].lo;
- if ((u8 *)Xi == xi)
- break;
- n = *(--xi);
- rem = (size_t)Z.lo & 0xff;
- Z.lo = (Z.hi << 56) | (Z.lo >> 8);
- Z.hi = (Z.hi >> 8);
- if (sizeof(size_t) == 8)
- Z.hi ^= rem_8bit[rem];
- else
- Z.hi ^= (u64)rem_8bit[rem] << 32;
- }
- if (IS_LITTLE_ENDIAN) {
- # ifdef BSWAP8
- Xi[0] = BSWAP8(Z.hi);
- Xi[1] = BSWAP8(Z.lo);
- # else
- u8 *p = (u8 *)Xi;
- u32 v;
- v = (u32)(Z.hi >> 32);
- PUTU32(p, v);
- v = (u32)(Z.hi);
- PUTU32(p + 4, v);
- v = (u32)(Z.lo >> 32);
- PUTU32(p + 8, v);
- v = (u32)(Z.lo);
- PUTU32(p + 12, v);
- # endif
- } else {
- Xi[0] = Z.hi;
- Xi[1] = Z.lo;
- }
- }
- # define GCM_MUL(ctx) gcm_gmult_8bit(ctx->Xi.u,ctx->Htable)
- #elif TABLE_BITS==4
- static void gcm_init_4bit(u128 Htable[16], u64 H[2])
- {
- u128 V;
- # if defined(OPENSSL_SMALL_FOOTPRINT)
- int i;
- # endif
- Htable[0].hi = 0;
- Htable[0].lo = 0;
- V.hi = H[0];
- V.lo = H[1];
- # if defined(OPENSSL_SMALL_FOOTPRINT)
- for (Htable[8] = V, i = 4; i > 0; i >>= 1) {
- REDUCE1BIT(V);
- Htable[i] = V;
- }
- for (i = 2; i < 16; i <<= 1) {
- u128 *Hi = Htable + i;
- int j;
- for (V = *Hi, j = 1; j < i; ++j) {
- Hi[j].hi = V.hi ^ Htable[j].hi;
- Hi[j].lo = V.lo ^ Htable[j].lo;
- }
- }
- # else
- Htable[8] = V;
- REDUCE1BIT(V);
- Htable[4] = V;
- REDUCE1BIT(V);
- Htable[2] = V;
- REDUCE1BIT(V);
- Htable[1] = V;
- Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
- V = Htable[4];
- Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
- Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
- Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
- V = Htable[8];
- Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
- Htable[10].hi = V.hi ^ Htable[2].hi, Htable[10].lo = V.lo ^ Htable[2].lo;
- Htable[11].hi = V.hi ^ Htable[3].hi, Htable[11].lo = V.lo ^ Htable[3].lo;
- Htable[12].hi = V.hi ^ Htable[4].hi, Htable[12].lo = V.lo ^ Htable[4].lo;
- Htable[13].hi = V.hi ^ Htable[5].hi, Htable[13].lo = V.lo ^ Htable[5].lo;
- Htable[14].hi = V.hi ^ Htable[6].hi, Htable[14].lo = V.lo ^ Htable[6].lo;
- Htable[15].hi = V.hi ^ Htable[7].hi, Htable[15].lo = V.lo ^ Htable[7].lo;
- # endif
- # if defined(GHASH_ASM) && (defined(__arm__) || defined(__arm))
- /*
- * ARM assembler expects specific dword order in Htable.
- */
- {
- int j;
- DECLARE_IS_ENDIAN;
- if (IS_LITTLE_ENDIAN)
- for (j = 0; j < 16; ++j) {
- V = Htable[j];
- Htable[j].hi = V.lo;
- Htable[j].lo = V.hi;
- } else
- for (j = 0; j < 16; ++j) {
- V = Htable[j];
- Htable[j].hi = V.lo << 32 | V.lo >> 32;
- Htable[j].lo = V.hi << 32 | V.hi >> 32;
- }
- }
- # endif
- }
- # ifndef GHASH_ASM
- static const size_t rem_4bit[16] = {
- PACK(0x0000), PACK(0x1C20), PACK(0x3840), PACK(0x2460),
- PACK(0x7080), PACK(0x6CA0), PACK(0x48C0), PACK(0x54E0),
- PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560),
- PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0)
- };
- static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
- {
- u128 Z;
- int cnt = 15;
- size_t rem, nlo, nhi;
- DECLARE_IS_ENDIAN;
- nlo = ((const u8 *)Xi)[15];
- nhi = nlo >> 4;
- nlo &= 0xf;
- Z.hi = Htable[nlo].hi;
- Z.lo = Htable[nlo].lo;
- while (1) {
- rem = (size_t)Z.lo & 0xf;
- Z.lo = (Z.hi << 60) | (Z.lo >> 4);
- Z.hi = (Z.hi >> 4);
- if (sizeof(size_t) == 8)
- Z.hi ^= rem_4bit[rem];
- else
- Z.hi ^= (u64)rem_4bit[rem] << 32;
- Z.hi ^= Htable[nhi].hi;
- Z.lo ^= Htable[nhi].lo;
- if (--cnt < 0)
- break;
- nlo = ((const u8 *)Xi)[cnt];
- nhi = nlo >> 4;
- nlo &= 0xf;
- rem = (size_t)Z.lo & 0xf;
- Z.lo = (Z.hi << 60) | (Z.lo >> 4);
- Z.hi = (Z.hi >> 4);
- if (sizeof(size_t) == 8)
- Z.hi ^= rem_4bit[rem];
- else
- Z.hi ^= (u64)rem_4bit[rem] << 32;
- Z.hi ^= Htable[nlo].hi;
- Z.lo ^= Htable[nlo].lo;
- }
- if (IS_LITTLE_ENDIAN) {
- # ifdef BSWAP8
- Xi[0] = BSWAP8(Z.hi);
- Xi[1] = BSWAP8(Z.lo);
- # else
- u8 *p = (u8 *)Xi;
- u32 v;
- v = (u32)(Z.hi >> 32);
- PUTU32(p, v);
- v = (u32)(Z.hi);
- PUTU32(p + 4, v);
- v = (u32)(Z.lo >> 32);
- PUTU32(p + 8, v);
- v = (u32)(Z.lo);
- PUTU32(p + 12, v);
- # endif
- } else {
- Xi[0] = Z.hi;
- Xi[1] = Z.lo;
- }
- }
- # if !defined(OPENSSL_SMALL_FOOTPRINT)
- /*
- * Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for
- * details... Compiler-generated code doesn't seem to give any
- * performance improvement, at least not on x86[_64]. It's here
- * mostly as reference and a placeholder for possible future
- * non-trivial optimization[s]...
- */
- static void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16],
- const u8 *inp, size_t len)
- {
- u128 Z;
- int cnt;
- size_t rem, nlo, nhi;
- DECLARE_IS_ENDIAN;
- # if 1
- do {
- cnt = 15;
- nlo = ((const u8 *)Xi)[15];
- nlo ^= inp[15];
- nhi = nlo >> 4;
- nlo &= 0xf;
- Z.hi = Htable[nlo].hi;
- Z.lo = Htable[nlo].lo;
- while (1) {
- rem = (size_t)Z.lo & 0xf;
- Z.lo = (Z.hi << 60) | (Z.lo >> 4);
- Z.hi = (Z.hi >> 4);
- if (sizeof(size_t) == 8)
- Z.hi ^= rem_4bit[rem];
- else
- Z.hi ^= (u64)rem_4bit[rem] << 32;
- Z.hi ^= Htable[nhi].hi;
- Z.lo ^= Htable[nhi].lo;
- if (--cnt < 0)
- break;
- nlo = ((const u8 *)Xi)[cnt];
- nlo ^= inp[cnt];
- nhi = nlo >> 4;
- nlo &= 0xf;
- rem = (size_t)Z.lo & 0xf;
- Z.lo = (Z.hi << 60) | (Z.lo >> 4);
- Z.hi = (Z.hi >> 4);
- if (sizeof(size_t) == 8)
- Z.hi ^= rem_4bit[rem];
- else
- Z.hi ^= (u64)rem_4bit[rem] << 32;
- Z.hi ^= Htable[nlo].hi;
- Z.lo ^= Htable[nlo].lo;
- }
- # else
- /*
- * Extra 256+16 bytes per-key plus 512 bytes shared tables
- * [should] give ~50% improvement... One could have PACK()-ed
- * the rem_8bit even here, but the priority is to minimize
- * cache footprint...
- */
- u128 Hshr4[16]; /* Htable shifted right by 4 bits */
- u8 Hshl4[16]; /* Htable shifted left by 4 bits */
- static const unsigned short rem_8bit[256] = {
- 0x0000, 0x01C2, 0x0384, 0x0246, 0x0708, 0x06CA, 0x048C, 0x054E,
- 0x0E10, 0x0FD2, 0x0D94, 0x0C56, 0x0918, 0x08DA, 0x0A9C, 0x0B5E,
- 0x1C20, 0x1DE2, 0x1FA4, 0x1E66, 0x1B28, 0x1AEA, 0x18AC, 0x196E,
- 0x1230, 0x13F2, 0x11B4, 0x1076, 0x1538, 0x14FA, 0x16BC, 0x177E,
- 0x3840, 0x3982, 0x3BC4, 0x3A06, 0x3F48, 0x3E8A, 0x3CCC, 0x3D0E,
- 0x3650, 0x3792, 0x35D4, 0x3416, 0x3158, 0x309A, 0x32DC, 0x331E,
- 0x2460, 0x25A2, 0x27E4, 0x2626, 0x2368, 0x22AA, 0x20EC, 0x212E,
- 0x2A70, 0x2BB2, 0x29F4, 0x2836, 0x2D78, 0x2CBA, 0x2EFC, 0x2F3E,
- 0x7080, 0x7142, 0x7304, 0x72C6, 0x7788, 0x764A, 0x740C, 0x75CE,
- 0x7E90, 0x7F52, 0x7D14, 0x7CD6, 0x7998, 0x785A, 0x7A1C, 0x7BDE,
- 0x6CA0, 0x6D62, 0x6F24, 0x6EE6, 0x6BA8, 0x6A6A, 0x682C, 0x69EE,
- 0x62B0, 0x6372, 0x6134, 0x60F6, 0x65B8, 0x647A, 0x663C, 0x67FE,
- 0x48C0, 0x4902, 0x4B44, 0x4A86, 0x4FC8, 0x4E0A, 0x4C4C, 0x4D8E,
- 0x46D0, 0x4712, 0x4554, 0x4496, 0x41D8, 0x401A, 0x425C, 0x439E,
- 0x54E0, 0x5522, 0x5764, 0x56A6, 0x53E8, 0x522A, 0x506C, 0x51AE,
- 0x5AF0, 0x5B32, 0x5974, 0x58B6, 0x5DF8, 0x5C3A, 0x5E7C, 0x5FBE,
- 0xE100, 0xE0C2, 0xE284, 0xE346, 0xE608, 0xE7CA, 0xE58C, 0xE44E,
- 0xEF10, 0xEED2, 0xEC94, 0xED56, 0xE818, 0xE9DA, 0xEB9C, 0xEA5E,
- 0xFD20, 0xFCE2, 0xFEA4, 0xFF66, 0xFA28, 0xFBEA, 0xF9AC, 0xF86E,
- 0xF330, 0xF2F2, 0xF0B4, 0xF176, 0xF438, 0xF5FA, 0xF7BC, 0xF67E,
- 0xD940, 0xD882, 0xDAC4, 0xDB06, 0xDE48, 0xDF8A, 0xDDCC, 0xDC0E,
- 0xD750, 0xD692, 0xD4D4, 0xD516, 0xD058, 0xD19A, 0xD3DC, 0xD21E,
- 0xC560, 0xC4A2, 0xC6E4, 0xC726, 0xC268, 0xC3AA, 0xC1EC, 0xC02E,
- 0xCB70, 0xCAB2, 0xC8F4, 0xC936, 0xCC78, 0xCDBA, 0xCFFC, 0xCE3E,
- 0x9180, 0x9042, 0x9204, 0x93C6, 0x9688, 0x974A, 0x950C, 0x94CE,
- 0x9F90, 0x9E52, 0x9C14, 0x9DD6, 0x9898, 0x995A, 0x9B1C, 0x9ADE,
- 0x8DA0, 0x8C62, 0x8E24, 0x8FE6, 0x8AA8, 0x8B6A, 0x892C, 0x88EE,
- 0x83B0, 0x8272, 0x8034, 0x81F6, 0x84B8, 0x857A, 0x873C, 0x86FE,
- 0xA9C0, 0xA802, 0xAA44, 0xAB86, 0xAEC8, 0xAF0A, 0xAD4C, 0xAC8E,
- 0xA7D0, 0xA612, 0xA454, 0xA596, 0xA0D8, 0xA11A, 0xA35C, 0xA29E,
- 0xB5E0, 0xB422, 0xB664, 0xB7A6, 0xB2E8, 0xB32A, 0xB16C, 0xB0AE,
- 0xBBF0, 0xBA32, 0xB874, 0xB9B6, 0xBCF8, 0xBD3A, 0xBF7C, 0xBEBE
- };
- /*
- * This pre-processing phase slows down procedure by approximately
- * same time as it makes each loop spin faster. In other words
- * single block performance is approximately same as straightforward
- * "4-bit" implementation, and then it goes only faster...
- */
- for (cnt = 0; cnt < 16; ++cnt) {
- Z.hi = Htable[cnt].hi;
- Z.lo = Htable[cnt].lo;
- Hshr4[cnt].lo = (Z.hi << 60) | (Z.lo >> 4);
- Hshr4[cnt].hi = (Z.hi >> 4);
- Hshl4[cnt] = (u8)(Z.lo << 4);
- }
- do {
- for (Z.lo = 0, Z.hi = 0, cnt = 15; cnt; --cnt) {
- nlo = ((const u8 *)Xi)[cnt];
- nlo ^= inp[cnt];
- nhi = nlo >> 4;
- nlo &= 0xf;
- Z.hi ^= Htable[nlo].hi;
- Z.lo ^= Htable[nlo].lo;
- rem = (size_t)Z.lo & 0xff;
- Z.lo = (Z.hi << 56) | (Z.lo >> 8);
- Z.hi = (Z.hi >> 8);
- Z.hi ^= Hshr4[nhi].hi;
- Z.lo ^= Hshr4[nhi].lo;
- Z.hi ^= (u64)rem_8bit[rem ^ Hshl4[nhi]] << 48;
- }
- nlo = ((const u8 *)Xi)[0];
- nlo ^= inp[0];
- nhi = nlo >> 4;
- nlo &= 0xf;
- Z.hi ^= Htable[nlo].hi;
- Z.lo ^= Htable[nlo].lo;
- rem = (size_t)Z.lo & 0xf;
- Z.lo = (Z.hi << 60) | (Z.lo >> 4);
- Z.hi = (Z.hi >> 4);
- Z.hi ^= Htable[nhi].hi;
- Z.lo ^= Htable[nhi].lo;
- Z.hi ^= ((u64)rem_8bit[rem << 4]) << 48;
- # endif
- if (IS_LITTLE_ENDIAN) {
- # ifdef BSWAP8
- Xi[0] = BSWAP8(Z.hi);
- Xi[1] = BSWAP8(Z.lo);
- # else
- u8 *p = (u8 *)Xi;
- u32 v;
- v = (u32)(Z.hi >> 32);
- PUTU32(p, v);
- v = (u32)(Z.hi);
- PUTU32(p + 4, v);
- v = (u32)(Z.lo >> 32);
- PUTU32(p + 8, v);
- v = (u32)(Z.lo);
- PUTU32(p + 12, v);
- # endif
- } else {
- Xi[0] = Z.hi;
- Xi[1] = Z.lo;
- }
- } while (inp += 16, len -= 16);
- }
- # endif
- # else
- void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]);
- void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp,
- size_t len);
- # endif
- # define GCM_MUL(ctx) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable)
- # if defined(GHASH_ASM) || !defined(OPENSSL_SMALL_FOOTPRINT)
- # define GHASH(ctx,in,len) gcm_ghash_4bit((ctx)->Xi.u,(ctx)->Htable,in,len)
- /*
- * GHASH_CHUNK is "stride parameter" missioned to mitigate cache trashing
- * effect. In other words idea is to hash data while it's still in L1 cache
- * after encryption pass...
- */
- # define GHASH_CHUNK (3*1024)
- # endif
- #else /* TABLE_BITS */
- static void gcm_gmult_1bit(u64 Xi[2], const u64 H[2])
- {
- u128 V, Z = { 0, 0 };
- long X;
- int i, j;
- const long *xi = (const long *)Xi;
- DECLARE_IS_ENDIAN;
- V.hi = H[0]; /* H is in host byte order, no byte swapping */
- V.lo = H[1];
- for (j = 0; j < 16 / sizeof(long); ++j) {
- if (IS_LITTLE_ENDIAN) {
- if (sizeof(long) == 8) {
- # ifdef BSWAP8
- X = (long)(BSWAP8(xi[j]));
- # else
- const u8 *p = (const u8 *)(xi + j);
- X = (long)((u64)GETU32(p) << 32 | GETU32(p + 4));
- # endif
- } else {
- const u8 *p = (const u8 *)(xi + j);
- X = (long)GETU32(p);
- }
- } else
- X = xi[j];
- for (i = 0; i < 8 * sizeof(long); ++i, X <<= 1) {
- u64 M = (u64)(X >> (8 * sizeof(long) - 1));
- Z.hi ^= V.hi & M;
- Z.lo ^= V.lo & M;
- REDUCE1BIT(V);
- }
- }
- if (IS_LITTLE_ENDIAN) {
- # ifdef BSWAP8
- Xi[0] = BSWAP8(Z.hi);
- Xi[1] = BSWAP8(Z.lo);
- # else
- u8 *p = (u8 *)Xi;
- u32 v;
- v = (u32)(Z.hi >> 32);
- PUTU32(p, v);
- v = (u32)(Z.hi);
- PUTU32(p + 4, v);
- v = (u32)(Z.lo >> 32);
- PUTU32(p + 8, v);
- v = (u32)(Z.lo);
- PUTU32(p + 12, v);
- # endif
- } else {
- Xi[0] = Z.hi;
- Xi[1] = Z.lo;
- }
- }
- # define GCM_MUL(ctx) gcm_gmult_1bit(ctx->Xi.u,ctx->H.u)
- #endif
- #if TABLE_BITS==4 && (defined(GHASH_ASM) || defined(OPENSSL_CPUID_OBJ))
- # if !defined(I386_ONLY) && \
- (defined(__i386) || defined(__i386__) || \
- defined(__x86_64) || defined(__x86_64__) || \
- defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
- # define GHASH_ASM_X86_OR_64
- # define GCM_FUNCREF_4BIT
- void gcm_init_clmul(u128 Htable[16], const u64 Xi[2]);
- void gcm_gmult_clmul(u64 Xi[2], const u128 Htable[16]);
- void gcm_ghash_clmul(u64 Xi[2], const u128 Htable[16], const u8 *inp,
- size_t len);
- # if defined(__i386) || defined(__i386__) || defined(_M_IX86)
- # define gcm_init_avx gcm_init_clmul
- # define gcm_gmult_avx gcm_gmult_clmul
- # define gcm_ghash_avx gcm_ghash_clmul
- # else
- void gcm_init_avx(u128 Htable[16], const u64 Xi[2]);
- void gcm_gmult_avx(u64 Xi[2], const u128 Htable[16]);
- void gcm_ghash_avx(u64 Xi[2], const u128 Htable[16], const u8 *inp,
- size_t len);
- # endif
- # if defined(__i386) || defined(__i386__) || defined(_M_IX86)
- # define GHASH_ASM_X86
- void gcm_gmult_4bit_mmx(u64 Xi[2], const u128 Htable[16]);
- void gcm_ghash_4bit_mmx(u64 Xi[2], const u128 Htable[16], const u8 *inp,
- size_t len);
- void gcm_gmult_4bit_x86(u64 Xi[2], const u128 Htable[16]);
- void gcm_ghash_4bit_x86(u64 Xi[2], const u128 Htable[16], const u8 *inp,
- size_t len);
- # endif
- # elif defined(__arm__) || defined(__arm) || defined(__aarch64__)
- # include "arm_arch.h"
- # if __ARM_MAX_ARCH__>=7
- # define GHASH_ASM_ARM
- # define GCM_FUNCREF_4BIT
- # define PMULL_CAPABLE (OPENSSL_armcap_P & ARMV8_PMULL)
- # if defined(__arm__) || defined(__arm)
- # define NEON_CAPABLE (OPENSSL_armcap_P & ARMV7_NEON)
- # endif
- void gcm_init_neon(u128 Htable[16], const u64 Xi[2]);
- void gcm_gmult_neon(u64 Xi[2], const u128 Htable[16]);
- void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp,
- size_t len);
- void gcm_init_v8(u128 Htable[16], const u64 Xi[2]);
- void gcm_gmult_v8(u64 Xi[2], const u128 Htable[16]);
- void gcm_ghash_v8(u64 Xi[2], const u128 Htable[16], const u8 *inp,
- size_t len);
- # endif
- # elif defined(__sparc__) || defined(__sparc)
- # include "sparc_arch.h"
- # define GHASH_ASM_SPARC
- # define GCM_FUNCREF_4BIT
- extern unsigned int OPENSSL_sparcv9cap_P[];
- void gcm_init_vis3(u128 Htable[16], const u64 Xi[2]);
- void gcm_gmult_vis3(u64 Xi[2], const u128 Htable[16]);
- void gcm_ghash_vis3(u64 Xi[2], const u128 Htable[16], const u8 *inp,
- size_t len);
- # elif defined(OPENSSL_CPUID_OBJ) && (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC))
- # include "ppc_arch.h"
- # define GHASH_ASM_PPC
- # define GCM_FUNCREF_4BIT
- void gcm_init_p8(u128 Htable[16], const u64 Xi[2]);
- void gcm_gmult_p8(u64 Xi[2], const u128 Htable[16]);
- void gcm_ghash_p8(u64 Xi[2], const u128 Htable[16], const u8 *inp,
- size_t len);
- # endif
- #endif
- #ifdef GCM_FUNCREF_4BIT
- # undef GCM_MUL
- # define GCM_MUL(ctx) (*gcm_gmult_p)(ctx->Xi.u,ctx->Htable)
- # ifdef GHASH
- # undef GHASH
- # define GHASH(ctx,in,len) (*gcm_ghash_p)(ctx->Xi.u,ctx->Htable,in,len)
- # endif
- #endif
- void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
- {
- DECLARE_IS_ENDIAN;
- memset(ctx, 0, sizeof(*ctx));
- ctx->block = block;
- ctx->key = key;
- (*block) (ctx->H.c, ctx->H.c, key);
- if (IS_LITTLE_ENDIAN) {
- /* H is stored in host byte order */
- #ifdef BSWAP8
- ctx->H.u[0] = BSWAP8(ctx->H.u[0]);
- ctx->H.u[1] = BSWAP8(ctx->H.u[1]);
- #else
- u8 *p = ctx->H.c;
- u64 hi, lo;
- hi = (u64)GETU32(p) << 32 | GETU32(p + 4);
- lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12);
- ctx->H.u[0] = hi;
- ctx->H.u[1] = lo;
- #endif
- }
- #if TABLE_BITS==8
- gcm_init_8bit(ctx->Htable, ctx->H.u);
- #elif TABLE_BITS==4
- # if defined(GHASH)
- # define CTX__GHASH(f) (ctx->ghash = (f))
- # else
- # define CTX__GHASH(f) (ctx->ghash = NULL)
- # endif
- # if defined(GHASH_ASM_X86_OR_64)
- # if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2)
- if (OPENSSL_ia32cap_P[1] & (1 << 1)) { /* check PCLMULQDQ bit */
- if (((OPENSSL_ia32cap_P[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */
- gcm_init_avx(ctx->Htable, ctx->H.u);
- ctx->gmult = gcm_gmult_avx;
- CTX__GHASH(gcm_ghash_avx);
- } else {
- gcm_init_clmul(ctx->Htable, ctx->H.u);
- ctx->gmult = gcm_gmult_clmul;
- CTX__GHASH(gcm_ghash_clmul);
- }
- return;
- }
- # endif
- gcm_init_4bit(ctx->Htable, ctx->H.u);
- # if defined(GHASH_ASM_X86) /* x86 only */
- # if defined(OPENSSL_IA32_SSE2)
- if (OPENSSL_ia32cap_P[0] & (1 << 25)) { /* check SSE bit */
- # else
- if (OPENSSL_ia32cap_P[0] & (1 << 23)) { /* check MMX bit */
- # endif
- ctx->gmult = gcm_gmult_4bit_mmx;
- CTX__GHASH(gcm_ghash_4bit_mmx);
- } else {
- ctx->gmult = gcm_gmult_4bit_x86;
- CTX__GHASH(gcm_ghash_4bit_x86);
- }
- # else
- ctx->gmult = gcm_gmult_4bit;
- CTX__GHASH(gcm_ghash_4bit);
- # endif
- # elif defined(GHASH_ASM_ARM)
- # ifdef PMULL_CAPABLE
- if (PMULL_CAPABLE) {
- gcm_init_v8(ctx->Htable, ctx->H.u);
- ctx->gmult = gcm_gmult_v8;
- CTX__GHASH(gcm_ghash_v8);
- } else
- # endif
- # ifdef NEON_CAPABLE
- if (NEON_CAPABLE) {
- gcm_init_neon(ctx->Htable, ctx->H.u);
- ctx->gmult = gcm_gmult_neon;
- CTX__GHASH(gcm_ghash_neon);
- } else
- # endif
- {
- gcm_init_4bit(ctx->Htable, ctx->H.u);
- ctx->gmult = gcm_gmult_4bit;
- CTX__GHASH(gcm_ghash_4bit);
- }
- # elif defined(GHASH_ASM_SPARC)
- if (OPENSSL_sparcv9cap_P[0] & SPARCV9_VIS3) {
- gcm_init_vis3(ctx->Htable, ctx->H.u);
- ctx->gmult = gcm_gmult_vis3;
- CTX__GHASH(gcm_ghash_vis3);
- } else {
- gcm_init_4bit(ctx->Htable, ctx->H.u);
- ctx->gmult = gcm_gmult_4bit;
- CTX__GHASH(gcm_ghash_4bit);
- }
- # elif defined(GHASH_ASM_PPC)
- if (OPENSSL_ppccap_P & PPC_CRYPTO207) {
- gcm_init_p8(ctx->Htable, ctx->H.u);
- ctx->gmult = gcm_gmult_p8;
- CTX__GHASH(gcm_ghash_p8);
- } else {
- gcm_init_4bit(ctx->Htable, ctx->H.u);
- ctx->gmult = gcm_gmult_4bit;
- CTX__GHASH(gcm_ghash_4bit);
- }
- # else
- gcm_init_4bit(ctx->Htable, ctx->H.u);
- # endif
- # undef CTX__GHASH
- #endif
- }
- void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv,
- size_t len)
- {
- DECLARE_IS_ENDIAN;
- unsigned int ctr;
- #ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
- #endif
- ctx->len.u[0] = 0; /* AAD length */
- ctx->len.u[1] = 0; /* message length */
- ctx->ares = 0;
- ctx->mres = 0;
- if (len == 12) {
- memcpy(ctx->Yi.c, iv, 12);
- ctx->Yi.c[12] = 0;
- ctx->Yi.c[13] = 0;
- ctx->Yi.c[14] = 0;
- ctx->Yi.c[15] = 1;
- ctr = 1;
- } else {
- size_t i;
- u64 len0 = len;
- /* Borrow ctx->Xi to calculate initial Yi */
- ctx->Xi.u[0] = 0;
- ctx->Xi.u[1] = 0;
- while (len >= 16) {
- for (i = 0; i < 16; ++i)
- ctx->Xi.c[i] ^= iv[i];
- GCM_MUL(ctx);
- iv += 16;
- len -= 16;
- }
- if (len) {
- for (i = 0; i < len; ++i)
- ctx->Xi.c[i] ^= iv[i];
- GCM_MUL(ctx);
- }
- len0 <<= 3;
- if (IS_LITTLE_ENDIAN) {
- #ifdef BSWAP8
- ctx->Xi.u[1] ^= BSWAP8(len0);
- #else
- ctx->Xi.c[8] ^= (u8)(len0 >> 56);
- ctx->Xi.c[9] ^= (u8)(len0 >> 48);
- ctx->Xi.c[10] ^= (u8)(len0 >> 40);
- ctx->Xi.c[11] ^= (u8)(len0 >> 32);
- ctx->Xi.c[12] ^= (u8)(len0 >> 24);
- ctx->Xi.c[13] ^= (u8)(len0 >> 16);
- ctx->Xi.c[14] ^= (u8)(len0 >> 8);
- ctx->Xi.c[15] ^= (u8)(len0);
- #endif
- } else {
- ctx->Xi.u[1] ^= len0;
- }
- GCM_MUL(ctx);
- if (IS_LITTLE_ENDIAN)
- #ifdef BSWAP4
- ctr = BSWAP4(ctx->Xi.d[3]);
- #else
- ctr = GETU32(ctx->Xi.c + 12);
- #endif
- else
- ctr = ctx->Xi.d[3];
- /* Copy borrowed Xi to Yi */
- ctx->Yi.u[0] = ctx->Xi.u[0];
- ctx->Yi.u[1] = ctx->Xi.u[1];
- }
- ctx->Xi.u[0] = 0;
- ctx->Xi.u[1] = 0;
- (*ctx->block) (ctx->Yi.c, ctx->EK0.c, ctx->key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- #ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- #else
- PUTU32(ctx->Yi.c + 12, ctr);
- #endif
- else
- ctx->Yi.d[3] = ctr;
- }
- int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad,
- size_t len)
- {
- size_t i;
- unsigned int n;
- u64 alen = ctx->len.u[0];
- #ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
- # ifdef GHASH
- void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
- const u8 *inp, size_t len) = ctx->ghash;
- # endif
- #endif
- if (ctx->len.u[1])
- return -2;
- alen += len;
- if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
- return -1;
- ctx->len.u[0] = alen;
- n = ctx->ares;
- if (n) {
- while (n && len) {
- ctx->Xi.c[n] ^= *(aad++);
- --len;
- n = (n + 1) % 16;
- }
- if (n == 0)
- GCM_MUL(ctx);
- else {
- ctx->ares = n;
- return 0;
- }
- }
- #ifdef GHASH
- if ((i = (len & (size_t)-16))) {
- GHASH(ctx, aad, i);
- aad += i;
- len -= i;
- }
- #else
- while (len >= 16) {
- for (i = 0; i < 16; ++i)
- ctx->Xi.c[i] ^= aad[i];
- GCM_MUL(ctx);
- aad += 16;
- len -= 16;
- }
- #endif
- if (len) {
- n = (unsigned int)len;
- for (i = 0; i < len; ++i)
- ctx->Xi.c[i] ^= aad[i];
- }
- ctx->ares = n;
- return 0;
- }
- int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len)
- {
- DECLARE_IS_ENDIAN;
- unsigned int n, ctr, mres;
- size_t i;
- u64 mlen = ctx->len.u[1];
- block128_f block = ctx->block;
- void *key = ctx->key;
- #ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
- # if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
- const u8 *inp, size_t len) = ctx->ghash;
- # endif
- #endif
- mlen += len;
- if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
- return -1;
- ctx->len.u[1] = mlen;
- mres = ctx->mres;
- if (ctx->ares) {
- /* First call to encrypt finalizes GHASH(AAD) */
- #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- if (len == 0) {
- GCM_MUL(ctx);
- ctx->ares = 0;
- return 0;
- }
- memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
- ctx->Xi.u[0] = 0;
- ctx->Xi.u[1] = 0;
- mres = sizeof(ctx->Xi);
- #else
- GCM_MUL(ctx);
- #endif
- ctx->ares = 0;
- }
- if (IS_LITTLE_ENDIAN)
- #ifdef BSWAP4
- ctr = BSWAP4(ctx->Yi.d[3]);
- #else
- ctr = GETU32(ctx->Yi.c + 12);
- #endif
- else
- ctr = ctx->Yi.d[3];
- n = mres % 16;
- #if !defined(OPENSSL_SMALL_FOOTPRINT)
- if (16 % sizeof(size_t) == 0) { /* always true actually */
- do {
- if (n) {
- # if defined(GHASH)
- while (n && len) {
- ctx->Xn[mres++] = *(out++) = *(in++) ^ ctx->EKi.c[n];
- --len;
- n = (n + 1) % 16;
- }
- if (n == 0) {
- GHASH(ctx, ctx->Xn, mres);
- mres = 0;
- } else {
- ctx->mres = mres;
- return 0;
- }
- # else
- while (n && len) {
- ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
- --len;
- n = (n + 1) % 16;
- }
- if (n == 0) {
- GCM_MUL(ctx);
- mres = 0;
- } else {
- ctx->mres = n;
- return 0;
- }
- # endif
- }
- # if defined(STRICT_ALIGNMENT)
- if (((size_t)in | (size_t)out) % sizeof(size_t) != 0)
- break;
- # endif
- # if defined(GHASH)
- if (len >= 16 && mres) {
- GHASH(ctx, ctx->Xn, mres);
- mres = 0;
- }
- # if defined(GHASH_CHUNK)
- while (len >= GHASH_CHUNK) {
- size_t j = GHASH_CHUNK;
- while (j) {
- size_t_aX *out_t = (size_t_aX *)out;
- const size_t_aX *in_t = (const size_t_aX *)in;
- (*block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- for (i = 0; i < 16 / sizeof(size_t); ++i)
- out_t[i] = in_t[i] ^ ctx->EKi.t[i];
- out += 16;
- in += 16;
- j -= 16;
- }
- GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
- len -= GHASH_CHUNK;
- }
- # endif
- if ((i = (len & (size_t)-16))) {
- size_t j = i;
- while (len >= 16) {
- size_t_aX *out_t = (size_t_aX *)out;
- const size_t_aX *in_t = (const size_t_aX *)in;
- (*block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- for (i = 0; i < 16 / sizeof(size_t); ++i)
- out_t[i] = in_t[i] ^ ctx->EKi.t[i];
- out += 16;
- in += 16;
- len -= 16;
- }
- GHASH(ctx, out - j, j);
- }
- # else
- while (len >= 16) {
- size_t *out_t = (size_t *)out;
- const size_t *in_t = (const size_t *)in;
- (*block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- for (i = 0; i < 16 / sizeof(size_t); ++i)
- ctx->Xi.t[i] ^= out_t[i] = in_t[i] ^ ctx->EKi.t[i];
- GCM_MUL(ctx);
- out += 16;
- in += 16;
- len -= 16;
- }
- # endif
- if (len) {
- (*block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- # if defined(GHASH)
- while (len--) {
- ctx->Xn[mres++] = out[n] = in[n] ^ ctx->EKi.c[n];
- ++n;
- }
- # else
- while (len--) {
- ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
- ++n;
- }
- mres = n;
- # endif
- }
- ctx->mres = mres;
- return 0;
- } while (0);
- }
- #endif
- for (i = 0; i < len; ++i) {
- if (n == 0) {
- (*block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- #ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- #else
- PUTU32(ctx->Yi.c + 12, ctr);
- #endif
- else
- ctx->Yi.d[3] = ctr;
- }
- #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- ctx->Xn[mres++] = out[i] = in[i] ^ ctx->EKi.c[n];
- n = (n + 1) % 16;
- if (mres == sizeof(ctx->Xn)) {
- GHASH(ctx,ctx->Xn,sizeof(ctx->Xn));
- mres = 0;
- }
- #else
- ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n];
- mres = n = (n + 1) % 16;
- if (n == 0)
- GCM_MUL(ctx);
- #endif
- }
- ctx->mres = mres;
- return 0;
- }
- int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len)
- {
- DECLARE_IS_ENDIAN;
- unsigned int n, ctr, mres;
- size_t i;
- u64 mlen = ctx->len.u[1];
- block128_f block = ctx->block;
- void *key = ctx->key;
- #ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
- # if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
- const u8 *inp, size_t len) = ctx->ghash;
- # endif
- #endif
- mlen += len;
- if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
- return -1;
- ctx->len.u[1] = mlen;
- mres = ctx->mres;
- if (ctx->ares) {
- /* First call to decrypt finalizes GHASH(AAD) */
- #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- if (len == 0) {
- GCM_MUL(ctx);
- ctx->ares = 0;
- return 0;
- }
- memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
- ctx->Xi.u[0] = 0;
- ctx->Xi.u[1] = 0;
- mres = sizeof(ctx->Xi);
- #else
- GCM_MUL(ctx);
- #endif
- ctx->ares = 0;
- }
- if (IS_LITTLE_ENDIAN)
- #ifdef BSWAP4
- ctr = BSWAP4(ctx->Yi.d[3]);
- #else
- ctr = GETU32(ctx->Yi.c + 12);
- #endif
- else
- ctr = ctx->Yi.d[3];
- n = mres % 16;
- #if !defined(OPENSSL_SMALL_FOOTPRINT)
- if (16 % sizeof(size_t) == 0) { /* always true actually */
- do {
- if (n) {
- # if defined(GHASH)
- while (n && len) {
- *(out++) = (ctx->Xn[mres++] = *(in++)) ^ ctx->EKi.c[n];
- --len;
- n = (n + 1) % 16;
- }
- if (n == 0) {
- GHASH(ctx, ctx->Xn, mres);
- mres = 0;
- } else {
- ctx->mres = mres;
- return 0;
- }
- # else
- while (n && len) {
- u8 c = *(in++);
- *(out++) = c ^ ctx->EKi.c[n];
- ctx->Xi.c[n] ^= c;
- --len;
- n = (n + 1) % 16;
- }
- if (n == 0) {
- GCM_MUL(ctx);
- mres = 0;
- } else {
- ctx->mres = n;
- return 0;
- }
- # endif
- }
- # if defined(STRICT_ALIGNMENT)
- if (((size_t)in | (size_t)out) % sizeof(size_t) != 0)
- break;
- # endif
- # if defined(GHASH)
- if (len >= 16 && mres) {
- GHASH(ctx, ctx->Xn, mres);
- mres = 0;
- }
- # if defined(GHASH_CHUNK)
- while (len >= GHASH_CHUNK) {
- size_t j = GHASH_CHUNK;
- GHASH(ctx, in, GHASH_CHUNK);
- while (j) {
- size_t_aX *out_t = (size_t_aX *)out;
- const size_t_aX *in_t = (const size_t_aX *)in;
- (*block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- for (i = 0; i < 16 / sizeof(size_t); ++i)
- out_t[i] = in_t[i] ^ ctx->EKi.t[i];
- out += 16;
- in += 16;
- j -= 16;
- }
- len -= GHASH_CHUNK;
- }
- # endif
- if ((i = (len & (size_t)-16))) {
- GHASH(ctx, in, i);
- while (len >= 16) {
- size_t_aX *out_t = (size_t_aX *)out;
- const size_t_aX *in_t = (const size_t_aX *)in;
- (*block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- for (i = 0; i < 16 / sizeof(size_t); ++i)
- out_t[i] = in_t[i] ^ ctx->EKi.t[i];
- out += 16;
- in += 16;
- len -= 16;
- }
- }
- # else
- while (len >= 16) {
- size_t *out_t = (size_t *)out;
- const size_t *in_t = (const size_t *)in;
- (*block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- for (i = 0; i < 16 / sizeof(size_t); ++i) {
- size_t c = in_t[i];
- out_t[i] = c ^ ctx->EKi.t[i];
- ctx->Xi.t[i] ^= c;
- }
- GCM_MUL(ctx);
- out += 16;
- in += 16;
- len -= 16;
- }
- # endif
- if (len) {
- (*block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- # if defined(GHASH)
- while (len--) {
- out[n] = (ctx->Xn[mres++] = in[n]) ^ ctx->EKi.c[n];
- ++n;
- }
- # else
- while (len--) {
- u8 c = in[n];
- ctx->Xi.c[n] ^= c;
- out[n] = c ^ ctx->EKi.c[n];
- ++n;
- }
- mres = n;
- # endif
- }
- ctx->mres = mres;
- return 0;
- } while (0);
- }
- #endif
- for (i = 0; i < len; ++i) {
- u8 c;
- if (n == 0) {
- (*block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- #ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- #else
- PUTU32(ctx->Yi.c + 12, ctr);
- #endif
- else
- ctx->Yi.d[3] = ctr;
- }
- #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- out[i] = (ctx->Xn[mres++] = c = in[i]) ^ ctx->EKi.c[n];
- n = (n + 1) % 16;
- if (mres == sizeof(ctx->Xn)) {
- GHASH(ctx,ctx->Xn,sizeof(ctx->Xn));
- mres = 0;
- }
- #else
- c = in[i];
- out[i] = c ^ ctx->EKi.c[n];
- ctx->Xi.c[n] ^= c;
- mres = n = (n + 1) % 16;
- if (n == 0)
- GCM_MUL(ctx);
- #endif
- }
- ctx->mres = mres;
- return 0;
- }
- int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len, ctr128_f stream)
- {
- #if defined(OPENSSL_SMALL_FOOTPRINT)
- return CRYPTO_gcm128_encrypt(ctx, in, out, len);
- #else
- DECLARE_IS_ENDIAN;
- unsigned int n, ctr, mres;
- size_t i;
- u64 mlen = ctx->len.u[1];
- void *key = ctx->key;
- # ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
- # ifdef GHASH
- void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
- const u8 *inp, size_t len) = ctx->ghash;
- # endif
- # endif
- mlen += len;
- if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
- return -1;
- ctx->len.u[1] = mlen;
- mres = ctx->mres;
- if (ctx->ares) {
- /* First call to encrypt finalizes GHASH(AAD) */
- #if defined(GHASH)
- if (len == 0) {
- GCM_MUL(ctx);
- ctx->ares = 0;
- return 0;
- }
- memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
- ctx->Xi.u[0] = 0;
- ctx->Xi.u[1] = 0;
- mres = sizeof(ctx->Xi);
- #else
- GCM_MUL(ctx);
- #endif
- ctx->ares = 0;
- }
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctr = BSWAP4(ctx->Yi.d[3]);
- # else
- ctr = GETU32(ctx->Yi.c + 12);
- # endif
- else
- ctr = ctx->Yi.d[3];
- n = mres % 16;
- if (n) {
- # if defined(GHASH)
- while (n && len) {
- ctx->Xn[mres++] = *(out++) = *(in++) ^ ctx->EKi.c[n];
- --len;
- n = (n + 1) % 16;
- }
- if (n == 0) {
- GHASH(ctx, ctx->Xn, mres);
- mres = 0;
- } else {
- ctx->mres = mres;
- return 0;
- }
- # else
- while (n && len) {
- ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
- --len;
- n = (n + 1) % 16;
- }
- if (n == 0) {
- GCM_MUL(ctx);
- mres = 0;
- } else {
- ctx->mres = n;
- return 0;
- }
- # endif
- }
- # if defined(GHASH)
- if (len >= 16 && mres) {
- GHASH(ctx, ctx->Xn, mres);
- mres = 0;
- }
- # if defined(GHASH_CHUNK)
- while (len >= GHASH_CHUNK) {
- (*stream) (in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
- ctr += GHASH_CHUNK / 16;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- GHASH(ctx, out, GHASH_CHUNK);
- out += GHASH_CHUNK;
- in += GHASH_CHUNK;
- len -= GHASH_CHUNK;
- }
- # endif
- # endif
- if ((i = (len & (size_t)-16))) {
- size_t j = i / 16;
- (*stream) (in, out, j, key, ctx->Yi.c);
- ctr += (unsigned int)j;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- in += i;
- len -= i;
- # if defined(GHASH)
- GHASH(ctx, out, i);
- out += i;
- # else
- while (j--) {
- for (i = 0; i < 16; ++i)
- ctx->Xi.c[i] ^= out[i];
- GCM_MUL(ctx);
- out += 16;
- }
- # endif
- }
- if (len) {
- (*ctx->block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- while (len--) {
- # if defined(GHASH)
- ctx->Xn[mres++] = out[n] = in[n] ^ ctx->EKi.c[n];
- # else
- ctx->Xi.c[mres++] ^= out[n] = in[n] ^ ctx->EKi.c[n];
- # endif
- ++n;
- }
- }
- ctx->mres = mres;
- return 0;
- #endif
- }
- int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len, ctr128_f stream)
- {
- #if defined(OPENSSL_SMALL_FOOTPRINT)
- return CRYPTO_gcm128_decrypt(ctx, in, out, len);
- #else
- DECLARE_IS_ENDIAN;
- unsigned int n, ctr, mres;
- size_t i;
- u64 mlen = ctx->len.u[1];
- void *key = ctx->key;
- # ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
- # ifdef GHASH
- void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
- const u8 *inp, size_t len) = ctx->ghash;
- # endif
- # endif
- mlen += len;
- if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
- return -1;
- ctx->len.u[1] = mlen;
- mres = ctx->mres;
- if (ctx->ares) {
- /* First call to decrypt finalizes GHASH(AAD) */
- # if defined(GHASH)
- if (len == 0) {
- GCM_MUL(ctx);
- ctx->ares = 0;
- return 0;
- }
- memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
- ctx->Xi.u[0] = 0;
- ctx->Xi.u[1] = 0;
- mres = sizeof(ctx->Xi);
- # else
- GCM_MUL(ctx);
- # endif
- ctx->ares = 0;
- }
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctr = BSWAP4(ctx->Yi.d[3]);
- # else
- ctr = GETU32(ctx->Yi.c + 12);
- # endif
- else
- ctr = ctx->Yi.d[3];
- n = mres % 16;
- if (n) {
- # if defined(GHASH)
- while (n && len) {
- *(out++) = (ctx->Xn[mres++] = *(in++)) ^ ctx->EKi.c[n];
- --len;
- n = (n + 1) % 16;
- }
- if (n == 0) {
- GHASH(ctx, ctx->Xn, mres);
- mres = 0;
- } else {
- ctx->mres = mres;
- return 0;
- }
- # else
- while (n && len) {
- u8 c = *(in++);
- *(out++) = c ^ ctx->EKi.c[n];
- ctx->Xi.c[n] ^= c;
- --len;
- n = (n + 1) % 16;
- }
- if (n == 0) {
- GCM_MUL(ctx);
- mres = 0;
- } else {
- ctx->mres = n;
- return 0;
- }
- # endif
- }
- # if defined(GHASH)
- if (len >= 16 && mres) {
- GHASH(ctx, ctx->Xn, mres);
- mres = 0;
- }
- # if defined(GHASH_CHUNK)
- while (len >= GHASH_CHUNK) {
- GHASH(ctx, in, GHASH_CHUNK);
- (*stream) (in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
- ctr += GHASH_CHUNK / 16;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- out += GHASH_CHUNK;
- in += GHASH_CHUNK;
- len -= GHASH_CHUNK;
- }
- # endif
- # endif
- if ((i = (len & (size_t)-16))) {
- size_t j = i / 16;
- # if defined(GHASH)
- GHASH(ctx, in, i);
- # else
- while (j--) {
- size_t k;
- for (k = 0; k < 16; ++k)
- ctx->Xi.c[k] ^= in[k];
- GCM_MUL(ctx);
- in += 16;
- }
- j = i / 16;
- in -= i;
- # endif
- (*stream) (in, out, j, key, ctx->Yi.c);
- ctr += (unsigned int)j;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- out += i;
- in += i;
- len -= i;
- }
- if (len) {
- (*ctx->block) (ctx->Yi.c, ctx->EKi.c, key);
- ++ctr;
- if (IS_LITTLE_ENDIAN)
- # ifdef BSWAP4
- ctx->Yi.d[3] = BSWAP4(ctr);
- # else
- PUTU32(ctx->Yi.c + 12, ctr);
- # endif
- else
- ctx->Yi.d[3] = ctr;
- while (len--) {
- # if defined(GHASH)
- out[n] = (ctx->Xn[mres++] = in[n]) ^ ctx->EKi.c[n];
- # else
- u8 c = in[n];
- ctx->Xi.c[mres++] ^= c;
- out[n] = c ^ ctx->EKi.c[n];
- # endif
- ++n;
- }
- }
- ctx->mres = mres;
- return 0;
- #endif
- }
- int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
- size_t len)
- {
- DECLARE_IS_ENDIAN;
- u64 alen = ctx->len.u[0] << 3;
- u64 clen = ctx->len.u[1] << 3;
- #ifdef GCM_FUNCREF_4BIT
- void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
- # if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
- const u8 *inp, size_t len) = ctx->ghash;
- # endif
- #endif
- #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- u128 bitlen;
- unsigned int mres = ctx->mres;
- if (mres) {
- unsigned blocks = (mres + 15) & -16;
- memset(ctx->Xn + mres, 0, blocks - mres);
- mres = blocks;
- if (mres == sizeof(ctx->Xn)) {
- GHASH(ctx, ctx->Xn, mres);
- mres = 0;
- }
- } else if (ctx->ares) {
- GCM_MUL(ctx);
- }
- #else
- if (ctx->mres || ctx->ares)
- GCM_MUL(ctx);
- #endif
- if (IS_LITTLE_ENDIAN) {
- #ifdef BSWAP8
- alen = BSWAP8(alen);
- clen = BSWAP8(clen);
- #else
- u8 *p = ctx->len.c;
- ctx->len.u[0] = alen;
- ctx->len.u[1] = clen;
- alen = (u64)GETU32(p) << 32 | GETU32(p + 4);
- clen = (u64)GETU32(p + 8) << 32 | GETU32(p + 12);
- #endif
- }
- #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
- bitlen.hi = alen;
- bitlen.lo = clen;
- memcpy(ctx->Xn + mres, &bitlen, sizeof(bitlen));
- mres += sizeof(bitlen);
- GHASH(ctx, ctx->Xn, mres);
- #else
- ctx->Xi.u[0] ^= alen;
- ctx->Xi.u[1] ^= clen;
- GCM_MUL(ctx);
- #endif
- ctx->Xi.u[0] ^= ctx->EK0.u[0];
- ctx->Xi.u[1] ^= ctx->EK0.u[1];
- if (tag && len <= sizeof(ctx->Xi))
- return CRYPTO_memcmp(ctx->Xi.c, tag, len);
- else
- return -1;
- }
- void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len)
- {
- CRYPTO_gcm128_finish(ctx, NULL, 0);
- memcpy(tag, ctx->Xi.c,
- len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));
- }
- GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block)
- {
- GCM128_CONTEXT *ret;
- if ((ret = OPENSSL_malloc(sizeof(*ret))) != NULL)
- CRYPTO_gcm128_init(ret, key, block);
- return ret;
- }
- void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx)
- {
- OPENSSL_clear_free(ctx, sizeof(*ctx));
- }
|