From fa1b3d08421d263694d5fb3989d796a57d714f2a Mon Sep 17 00:00:00 2001 From: Ben Lindstrom Date: Sun, 10 Dec 2000 01:55:37 +0000 Subject: 20001210 - (bal) OpenBSD CVS updates - markus@cvs.openbsd.org 2000/12/09 13:41:51 [cipher.c cipher.h rijndael.c rijndael.h rijndael_boxes.h] undo rijndael changes - markus@cvs.openbsd.org 2000/12/09 13:48:31 [rijndael.c] fix byte order bug w/o introducing new implementation - markus@cvs.openbsd.org 2000/12/09 14:08:27 [sftp-server.c] "" -> "." for realpath; from vinschen@redhat.com - markus@cvs.openbsd.org 2000/12/09 14:06:54 [ssh-agent.c] extern int optind; from stevesk@sweden.hp.com --- rijndael.c | 689 +++++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 395 insertions(+), 294 deletions(-) (limited to 'rijndael.c') diff --git a/rijndael.c b/rijndael.c index 92a39762f..10c779b4c 100644 --- a/rijndael.c +++ b/rijndael.c @@ -1,311 +1,412 @@ -/* - * rijndael-alg-fst.c v2.4 April '2000 - * rijndael-alg-api.c v2.4 April '2000 - * - * Optimised ANSI C code - * - * authors: v1.0: Antoon Bosselaers - * v2.0: Vincent Rijmen, K.U.Leuven - * v2.3: Paulo Barreto - * v2.4: Vincent Rijmen, K.U.Leuven - * - * This code is placed in the public domain. - */ - -#include -#include -#include +/* $OpenBSD: rijndael.c,v 1.6 2000/12/09 13:48:31 markus Exp $ */ + +/* This is an independent implementation of the encryption algorithm: */ +/* */ +/* RIJNDAEL by Joan Daemen and Vincent Rijmen */ +/* */ +/* which is a candidate algorithm in the Advanced Encryption Standard */ +/* programme of the US National Institute of Standards and Technology. */ +/* */ +/* Copyright in this implementation is held by Dr B R Gladman but I */ +/* hereby give permission for its free direct or derivative use subject */ +/* to acknowledgment of its origin and compliance with any conditions */ +/* that the originators of the algorithm place on its exploitation. */ +/* */ +/* Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999 */ + +/* Timing data for Rijndael (rijndael.c) + +Algorithm: rijndael (rijndael.c) + +128 bit key: +Key Setup: 305/1389 cycles (encrypt/decrypt) +Encrypt: 374 cycles = 68.4 mbits/sec +Decrypt: 352 cycles = 72.7 mbits/sec +Mean: 363 cycles = 70.5 mbits/sec + +192 bit key: +Key Setup: 277/1595 cycles (encrypt/decrypt) +Encrypt: 439 cycles = 58.3 mbits/sec +Decrypt: 425 cycles = 60.2 mbits/sec +Mean: 432 cycles = 59.3 mbits/sec + +256 bit key: +Key Setup: 374/1960 cycles (encrypt/decrypt) +Encrypt: 502 cycles = 51.0 mbits/sec +Decrypt: 498 cycles = 51.4 mbits/sec +Mean: 500 cycles = 51.2 mbits/sec + +*/ #include "config.h" #include "rijndael.h" -#include "rijndael_boxes.h" -int -rijndael_keysched(u_int8_t k[RIJNDAEL_MAXKC][4], - u_int8_t W[RIJNDAEL_MAXROUNDS+1][4][4], int ROUNDS) +void gen_tabs __P((void)); + +/* 3. Basic macros for speeding up generic operations */ + +/* Circular rotate of 32 bit values */ + +#define rotr(x,n) (((x) >> ((int)(n))) | ((x) << (32 - (int)(n)))) +#define rotl(x,n) (((x) << ((int)(n))) | ((x) >> (32 - (int)(n)))) + +/* Invert byte order in a 32 bit variable */ + +#define bswap(x) ((rotl(x, 8) & 0x00ff00ff) | (rotr(x, 8) & 0xff00ff00)) + +/* Extract byte from a 32 bit quantity (little endian notation) */ + +#define byte(x,n) ((u1byte)((x) >> (8 * n))) + +#if BYTE_ORDER != LITTLE_ENDIAN +#define BYTE_SWAP +#endif + +#ifdef BYTE_SWAP +#define io_swap(x) bswap(x) +#else +#define io_swap(x) (x) +#endif + +#define LARGE_TABLES + +u1byte pow_tab[256]; +u1byte log_tab[256]; +u1byte sbx_tab[256]; +u1byte isb_tab[256]; +u4byte rco_tab[ 10]; +u4byte ft_tab[4][256]; +u4byte it_tab[4][256]; + +#ifdef LARGE_TABLES + u4byte fl_tab[4][256]; + u4byte il_tab[4][256]; +#endif + +u4byte tab_gen = 0; + +#define ff_mult(a,b) (a && b ? pow_tab[(log_tab[a] + log_tab[b]) % 255] : 0) + +#define f_rn(bo, bi, n, k) \ + bo[n] = ft_tab[0][byte(bi[n],0)] ^ \ + ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \ + ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ + ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n) + +#define i_rn(bo, bi, n, k) \ + bo[n] = it_tab[0][byte(bi[n],0)] ^ \ + it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \ + it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ + it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n) + +#ifdef LARGE_TABLES + +#define ls_box(x) \ + ( fl_tab[0][byte(x, 0)] ^ \ + fl_tab[1][byte(x, 1)] ^ \ + fl_tab[2][byte(x, 2)] ^ \ + fl_tab[3][byte(x, 3)] ) + +#define f_rl(bo, bi, n, k) \ + bo[n] = fl_tab[0][byte(bi[n],0)] ^ \ + fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \ + fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ + fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n) + +#define i_rl(bo, bi, n, k) \ + bo[n] = il_tab[0][byte(bi[n],0)] ^ \ + il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \ + il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ + il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n) + +#else + +#define ls_box(x) \ + ((u4byte)sbx_tab[byte(x, 0)] << 0) ^ \ + ((u4byte)sbx_tab[byte(x, 1)] << 8) ^ \ + ((u4byte)sbx_tab[byte(x, 2)] << 16) ^ \ + ((u4byte)sbx_tab[byte(x, 3)] << 24) + +#define f_rl(bo, bi, n, k) \ + bo[n] = (u4byte)sbx_tab[byte(bi[n],0)] ^ \ + rotl(((u4byte)sbx_tab[byte(bi[(n + 1) & 3],1)]), 8) ^ \ + rotl(((u4byte)sbx_tab[byte(bi[(n + 2) & 3],2)]), 16) ^ \ + rotl(((u4byte)sbx_tab[byte(bi[(n + 3) & 3],3)]), 24) ^ *(k + n) + +#define i_rl(bo, bi, n, k) \ + bo[n] = (u4byte)isb_tab[byte(bi[n],0)] ^ \ + rotl(((u4byte)isb_tab[byte(bi[(n + 3) & 3],1)]), 8) ^ \ + rotl(((u4byte)isb_tab[byte(bi[(n + 2) & 3],2)]), 16) ^ \ + rotl(((u4byte)isb_tab[byte(bi[(n + 1) & 3],3)]), 24) ^ *(k + n) + +#endif + +void +gen_tabs(void) { - /* Calculate the necessary round keys - * The number of calculations depends on keyBits and blockBits - */ - int j, r, t, rconpointer = 0; - u_int8_t tk[RIJNDAEL_MAXKC][4]; - int KC = ROUNDS - 6; - - for (j = KC-1; j >= 0; j--) { - *((u_int32_t*)tk[j]) = *((u_int32_t*)k[j]); + u4byte i, t; + u1byte p, q; + + /* log and power tables for GF(2**8) finite field with */ + /* 0x11b as modular polynomial - the simplest prmitive */ + /* root is 0x11, used here to generate the tables */ + + for(i = 0,p = 1; i < 256; ++i) { + pow_tab[i] = (u1byte)p; log_tab[p] = (u1byte)i; + + p = p ^ (p << 1) ^ (p & 0x80 ? 0x01b : 0); } - r = 0; - t = 0; - /* copy values into round key array */ - for (j = 0; (j < KC) && (r < ROUNDS + 1); ) { - for (; (j < KC) && (t < 4); j++, t++) { - *((u_int32_t*)W[r][t]) = *((u_int32_t*)tk[j]); - } - if (t == 4) { - r++; - t = 0; - } + + log_tab[1] = 0; p = 1; + + for(i = 0; i < 10; ++i) { + rco_tab[i] = p; + + p = (p << 1) ^ (p & 0x80 ? 0x1b : 0); } - - while (r < ROUNDS + 1) { /* while not enough round key material calculated */ - /* calculate new values */ - tk[0][0] ^= S[tk[KC-1][1]]; - tk[0][1] ^= S[tk[KC-1][2]]; - tk[0][2] ^= S[tk[KC-1][3]]; - tk[0][3] ^= S[tk[KC-1][0]]; - tk[0][0] ^= rcon[rconpointer++]; - - if (KC != 8) { - for (j = 1; j < KC; j++) { - *((u_int32_t*)tk[j]) ^= *((u_int32_t*)tk[j-1]); - } - } else { - for (j = 1; j < KC/2; j++) { - *((u_int32_t*)tk[j]) ^= *((u_int32_t*)tk[j-1]); - } - tk[KC/2][0] ^= S[tk[KC/2 - 1][0]]; - tk[KC/2][1] ^= S[tk[KC/2 - 1][1]]; - tk[KC/2][2] ^= S[tk[KC/2 - 1][2]]; - tk[KC/2][3] ^= S[tk[KC/2 - 1][3]]; - for (j = KC/2 + 1; j < KC; j++) { - *((u_int32_t*)tk[j]) ^= *((u_int32_t*)tk[j-1]); - } - } - /* copy values into round key array */ - for (j = 0; (j < KC) && (r < ROUNDS + 1); ) { - for (; (j < KC) && (t < 4); j++, t++) { - *((u_int32_t*)W[r][t]) = *((u_int32_t*)tk[j]); - } - if (t == 4) { - r++; - t = 0; - } - } - } - return 0; -} -int -rijndael_key_enc_to_dec(u_int8_t W[RIJNDAEL_MAXROUNDS+1][4][4], int ROUNDS) -{ - int r; - u_int8_t *w; - - for (r = 1; r < ROUNDS; r++) { - w = W[r][0]; - *((u_int32_t*)w) = *((u_int32_t*)U1[w[0]]) - ^ *((u_int32_t*)U2[w[1]]) - ^ *((u_int32_t*)U3[w[2]]) - ^ *((u_int32_t*)U4[w[3]]); - - w = W[r][1]; - *((u_int32_t*)w) = *((u_int32_t*)U1[w[0]]) - ^ *((u_int32_t*)U2[w[1]]) - ^ *((u_int32_t*)U3[w[2]]) - ^ *((u_int32_t*)U4[w[3]]); - - w = W[r][2]; - *((u_int32_t*)w) = *((u_int32_t*)U1[w[0]]) - ^ *((u_int32_t*)U2[w[1]]) - ^ *((u_int32_t*)U3[w[2]]) - ^ *((u_int32_t*)U4[w[3]]); - - w = W[r][3]; - *((u_int32_t*)w) = *((u_int32_t*)U1[w[0]]) - ^ *((u_int32_t*)U2[w[1]]) - ^ *((u_int32_t*)U3[w[2]]) - ^ *((u_int32_t*)U4[w[3]]); + /* note that the affine byte transformation matrix in */ + /* rijndael specification is in big endian format with */ + /* bit 0 as the most significant bit. In the remainder */ + /* of the specification the bits are numbered from the */ + /* least significant end of a byte. */ + + for(i = 0; i < 256; ++i) { + p = (i ? pow_tab[255 - log_tab[i]] : 0); q = p; + q = (q >> 7) | (q << 1); p ^= q; + q = (q >> 7) | (q << 1); p ^= q; + q = (q >> 7) | (q << 1); p ^= q; + q = (q >> 7) | (q << 1); p ^= q ^ 0x63; + sbx_tab[i] = (u1byte)p; isb_tab[p] = (u1byte)i; } - return 0; -} - -/** - * Encrypt a single block. - */ -int -rijndael_encrypt(rijndael_key *key, u_int8_t a[16], u_int8_t b[16]) -{ - u_int8_t (*rk)[4][4] = key->keySched; - int ROUNDS = key->ROUNDS; - int r; - u_int8_t temp[4][4]; - - *((u_int32_t*)temp[0]) = *((u_int32_t*)(a )) ^ *((u_int32_t*)rk[0][0]); - *((u_int32_t*)temp[1]) = *((u_int32_t*)(a+ 4)) ^ *((u_int32_t*)rk[0][1]); - *((u_int32_t*)temp[2]) = *((u_int32_t*)(a+ 8)) ^ *((u_int32_t*)rk[0][2]); - *((u_int32_t*)temp[3]) = *((u_int32_t*)(a+12)) ^ *((u_int32_t*)rk[0][3]); - *((u_int32_t*)(b )) = *((u_int32_t*)T1[temp[0][0]]) - ^ *((u_int32_t*)T2[temp[1][1]]) - ^ *((u_int32_t*)T3[temp[2][2]]) - ^ *((u_int32_t*)T4[temp[3][3]]); - *((u_int32_t*)(b + 4)) = *((u_int32_t*)T1[temp[1][0]]) - ^ *((u_int32_t*)T2[temp[2][1]]) - ^ *((u_int32_t*)T3[temp[3][2]]) - ^ *((u_int32_t*)T4[temp[0][3]]); - *((u_int32_t*)(b + 8)) = *((u_int32_t*)T1[temp[2][0]]) - ^ *((u_int32_t*)T2[temp[3][1]]) - ^ *((u_int32_t*)T3[temp[0][2]]) - ^ *((u_int32_t*)T4[temp[1][3]]); - *((u_int32_t*)(b +12)) = *((u_int32_t*)T1[temp[3][0]]) - ^ *((u_int32_t*)T2[temp[0][1]]) - ^ *((u_int32_t*)T3[temp[1][2]]) - ^ *((u_int32_t*)T4[temp[2][3]]); - for (r = 1; r < ROUNDS-1; r++) { - *((u_int32_t*)temp[0]) = *((u_int32_t*)(b )) ^ *((u_int32_t*)rk[r][0]); - *((u_int32_t*)temp[1]) = *((u_int32_t*)(b+ 4)) ^ *((u_int32_t*)rk[r][1]); - *((u_int32_t*)temp[2]) = *((u_int32_t*)(b+ 8)) ^ *((u_int32_t*)rk[r][2]); - *((u_int32_t*)temp[3]) = *((u_int32_t*)(b+12)) ^ *((u_int32_t*)rk[r][3]); - - *((u_int32_t*)(b )) = *((u_int32_t*)T1[temp[0][0]]) - ^ *((u_int32_t*)T2[temp[1][1]]) - ^ *((u_int32_t*)T3[temp[2][2]]) - ^ *((u_int32_t*)T4[temp[3][3]]); - *((u_int32_t*)(b + 4)) = *((u_int32_t*)T1[temp[1][0]]) - ^ *((u_int32_t*)T2[temp[2][1]]) - ^ *((u_int32_t*)T3[temp[3][2]]) - ^ *((u_int32_t*)T4[temp[0][3]]); - *((u_int32_t*)(b + 8)) = *((u_int32_t*)T1[temp[2][0]]) - ^ *((u_int32_t*)T2[temp[3][1]]) - ^ *((u_int32_t*)T3[temp[0][2]]) - ^ *((u_int32_t*)T4[temp[1][3]]); - *((u_int32_t*)(b +12)) = *((u_int32_t*)T1[temp[3][0]]) - ^ *((u_int32_t*)T2[temp[0][1]]) - ^ *((u_int32_t*)T3[temp[1][2]]) - ^ *((u_int32_t*)T4[temp[2][3]]); + + for(i = 0; i < 256; ++i) { + p = sbx_tab[i]; + +#ifdef LARGE_TABLES + + t = p; fl_tab[0][i] = t; + fl_tab[1][i] = rotl(t, 8); + fl_tab[2][i] = rotl(t, 16); + fl_tab[3][i] = rotl(t, 24); +#endif + t = ((u4byte)ff_mult(2, p)) | + ((u4byte)p << 8) | + ((u4byte)p << 16) | + ((u4byte)ff_mult(3, p) << 24); + + ft_tab[0][i] = t; + ft_tab[1][i] = rotl(t, 8); + ft_tab[2][i] = rotl(t, 16); + ft_tab[3][i] = rotl(t, 24); + + p = isb_tab[i]; + +#ifdef LARGE_TABLES + + t = p; il_tab[0][i] = t; + il_tab[1][i] = rotl(t, 8); + il_tab[2][i] = rotl(t, 16); + il_tab[3][i] = rotl(t, 24); +#endif + t = ((u4byte)ff_mult(14, p)) | + ((u4byte)ff_mult( 9, p) << 8) | + ((u4byte)ff_mult(13, p) << 16) | + ((u4byte)ff_mult(11, p) << 24); + + it_tab[0][i] = t; + it_tab[1][i] = rotl(t, 8); + it_tab[2][i] = rotl(t, 16); + it_tab[3][i] = rotl(t, 24); } - /* last round is special */ - *((u_int32_t*)temp[0]) = *((u_int32_t*)(b )) ^ *((u_int32_t*)rk[ROUNDS-1][0]); - *((u_int32_t*)temp[1]) = *((u_int32_t*)(b+ 4)) ^ *((u_int32_t*)rk[ROUNDS-1][1]); - *((u_int32_t*)temp[2]) = *((u_int32_t*)(b+ 8)) ^ *((u_int32_t*)rk[ROUNDS-1][2]); - *((u_int32_t*)temp[3]) = *((u_int32_t*)(b+12)) ^ *((u_int32_t*)rk[ROUNDS-1][3]); - b[ 0] = T1[temp[0][0]][1]; - b[ 1] = T1[temp[1][1]][1]; - b[ 2] = T1[temp[2][2]][1]; - b[ 3] = T1[temp[3][3]][1]; - b[ 4] = T1[temp[1][0]][1]; - b[ 5] = T1[temp[2][1]][1]; - b[ 6] = T1[temp[3][2]][1]; - b[ 7] = T1[temp[0][3]][1]; - b[ 8] = T1[temp[2][0]][1]; - b[ 9] = T1[temp[3][1]][1]; - b[10] = T1[temp[0][2]][1]; - b[11] = T1[temp[1][3]][1]; - b[12] = T1[temp[3][0]][1]; - b[13] = T1[temp[0][1]][1]; - b[14] = T1[temp[1][2]][1]; - b[15] = T1[temp[2][3]][1]; - *((u_int32_t*)(b )) ^= *((u_int32_t*)rk[ROUNDS][0]); - *((u_int32_t*)(b+ 4)) ^= *((u_int32_t*)rk[ROUNDS][1]); - *((u_int32_t*)(b+ 8)) ^= *((u_int32_t*)rk[ROUNDS][2]); - *((u_int32_t*)(b+12)) ^= *((u_int32_t*)rk[ROUNDS][3]); - - return 0; + + tab_gen = 1; } -/** - * Decrypt a single block. - */ -int -rijndael_decrypt(rijndael_key *key, u_int8_t a[16], u_int8_t b[16]) -{ - u_int8_t (*rk)[4][4] = key->keySched; - int ROUNDS = key->ROUNDS; - int r; - u_int8_t temp[4][4]; +#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) + +#define imix_col(y,x) \ + u = star_x(x); \ + v = star_x(u); \ + w = star_x(v); \ + t = w ^ (x); \ + (y) = u ^ v ^ w; \ + (y) ^= rotr(u ^ t, 8) ^ \ + rotr(v ^ t, 16) ^ \ + rotr(t,24) + +/* initialise the key schedule from the user supplied key */ + +#define loop4(i) \ +{ t = ls_box(rotr(t, 8)) ^ rco_tab[i]; \ + t ^= e_key[4 * i]; e_key[4 * i + 4] = t; \ + t ^= e_key[4 * i + 1]; e_key[4 * i + 5] = t; \ + t ^= e_key[4 * i + 2]; e_key[4 * i + 6] = t; \ + t ^= e_key[4 * i + 3]; e_key[4 * i + 7] = t; \ +} + +#define loop6(i) \ +{ t = ls_box(rotr(t, 8)) ^ rco_tab[i]; \ + t ^= e_key[6 * i]; e_key[6 * i + 6] = t; \ + t ^= e_key[6 * i + 1]; e_key[6 * i + 7] = t; \ + t ^= e_key[6 * i + 2]; e_key[6 * i + 8] = t; \ + t ^= e_key[6 * i + 3]; e_key[6 * i + 9] = t; \ + t ^= e_key[6 * i + 4]; e_key[6 * i + 10] = t; \ + t ^= e_key[6 * i + 5]; e_key[6 * i + 11] = t; \ +} + +#define loop8(i) \ +{ t = ls_box(rotr(t, 8)) ^ rco_tab[i]; \ + t ^= e_key[8 * i]; e_key[8 * i + 8] = t; \ + t ^= e_key[8 * i + 1]; e_key[8 * i + 9] = t; \ + t ^= e_key[8 * i + 2]; e_key[8 * i + 10] = t; \ + t ^= e_key[8 * i + 3]; e_key[8 * i + 11] = t; \ + t = e_key[8 * i + 4] ^ ls_box(t); \ + e_key[8 * i + 12] = t; \ + t ^= e_key[8 * i + 5]; e_key[8 * i + 13] = t; \ + t ^= e_key[8 * i + 6]; e_key[8 * i + 14] = t; \ + t ^= e_key[8 * i + 7]; e_key[8 * i + 15] = t; \ +} + +rijndael_ctx * +rijndael_set_key(rijndael_ctx *ctx, const u4byte *in_key, const u4byte key_len, + int encrypt) +{ + u4byte i, t, u, v, w; + u4byte *e_key = ctx->e_key; + u4byte *d_key = ctx->d_key; + + ctx->decrypt = !encrypt; + + if(!tab_gen) + gen_tabs(); + + ctx->k_len = (key_len + 31) / 32; + + e_key[0] = io_swap(in_key[0]); e_key[1] = io_swap(in_key[1]); + e_key[2] = io_swap(in_key[2]); e_key[3] = io_swap(in_key[3]); - *((u_int32_t*)temp[0]) = *((u_int32_t*)(a )) ^ *((u_int32_t*)rk[ROUNDS][0]); - *((u_int32_t*)temp[1]) = *((u_int32_t*)(a+ 4)) ^ *((u_int32_t*)rk[ROUNDS][1]); - *((u_int32_t*)temp[2]) = *((u_int32_t*)(a+ 8)) ^ *((u_int32_t*)rk[ROUNDS][2]); - *((u_int32_t*)temp[3]) = *((u_int32_t*)(a+12)) ^ *((u_int32_t*)rk[ROUNDS][3]); - - *((u_int32_t*)(b )) = *((u_int32_t*)T5[temp[0][0]]) - ^ *((u_int32_t*)T6[temp[3][1]]) - ^ *((u_int32_t*)T7[temp[2][2]]) - ^ *((u_int32_t*)T8[temp[1][3]]); - *((u_int32_t*)(b+ 4)) = *((u_int32_t*)T5[temp[1][0]]) - ^ *((u_int32_t*)T6[temp[0][1]]) - ^ *((u_int32_t*)T7[temp[3][2]]) - ^ *((u_int32_t*)T8[temp[2][3]]); - *((u_int32_t*)(b+ 8)) = *((u_int32_t*)T5[temp[2][0]]) - ^ *((u_int32_t*)T6[temp[1][1]]) - ^ *((u_int32_t*)T7[temp[0][2]]) - ^ *((u_int32_t*)T8[temp[3][3]]); - *((u_int32_t*)(b+12)) = *((u_int32_t*)T5[temp[3][0]]) - ^ *((u_int32_t*)T6[temp[2][1]]) - ^ *((u_int32_t*)T7[temp[1][2]]) - ^ *((u_int32_t*)T8[temp[0][3]]); - for (r = ROUNDS-1; r > 1; r--) { - *((u_int32_t*)temp[0]) = *((u_int32_t*)(b )) ^ *((u_int32_t*)rk[r][0]); - *((u_int32_t*)temp[1]) = *((u_int32_t*)(b+ 4)) ^ *((u_int32_t*)rk[r][1]); - *((u_int32_t*)temp[2]) = *((u_int32_t*)(b+ 8)) ^ *((u_int32_t*)rk[r][2]); - *((u_int32_t*)temp[3]) = *((u_int32_t*)(b+12)) ^ *((u_int32_t*)rk[r][3]); - *((u_int32_t*)(b )) = *((u_int32_t*)T5[temp[0][0]]) - ^ *((u_int32_t*)T6[temp[3][1]]) - ^ *((u_int32_t*)T7[temp[2][2]]) - ^ *((u_int32_t*)T8[temp[1][3]]); - *((u_int32_t*)(b+ 4)) = *((u_int32_t*)T5[temp[1][0]]) - ^ *((u_int32_t*)T6[temp[0][1]]) - ^ *((u_int32_t*)T7[temp[3][2]]) - ^ *((u_int32_t*)T8[temp[2][3]]); - *((u_int32_t*)(b+ 8)) = *((u_int32_t*)T5[temp[2][0]]) - ^ *((u_int32_t*)T6[temp[1][1]]) - ^ *((u_int32_t*)T7[temp[0][2]]) - ^ *((u_int32_t*)T8[temp[3][3]]); - *((u_int32_t*)(b+12)) = *((u_int32_t*)T5[temp[3][0]]) - ^ *((u_int32_t*)T6[temp[2][1]]) - ^ *((u_int32_t*)T7[temp[1][2]]) - ^ *((u_int32_t*)T8[temp[0][3]]); + switch(ctx->k_len) { + case 4: t = e_key[3]; + for(i = 0; i < 10; ++i) + loop4(i); + break; + + case 6: e_key[4] = io_swap(in_key[4]); t = e_key[5] = io_swap(in_key[5]); + for(i = 0; i < 8; ++i) + loop6(i); + break; + + case 8: e_key[4] = io_swap(in_key[4]); e_key[5] = io_swap(in_key[5]); + e_key[6] = io_swap(in_key[6]); t = e_key[7] = io_swap(in_key[7]); + for(i = 0; i < 7; ++i) + loop8(i); + break; + } + + if (!encrypt) { + d_key[0] = e_key[0]; d_key[1] = e_key[1]; + d_key[2] = e_key[2]; d_key[3] = e_key[3]; + + for(i = 4; i < 4 * ctx->k_len + 24; ++i) { + imix_col(d_key[i], e_key[i]); + } } - /* last round is special */ - *((u_int32_t*)temp[0]) = *((u_int32_t*)(b )) ^ *((u_int32_t*)rk[1][0]); - *((u_int32_t*)temp[1]) = *((u_int32_t*)(b+ 4)) ^ *((u_int32_t*)rk[1][1]); - *((u_int32_t*)temp[2]) = *((u_int32_t*)(b+ 8)) ^ *((u_int32_t*)rk[1][2]); - *((u_int32_t*)temp[3]) = *((u_int32_t*)(b+12)) ^ *((u_int32_t*)rk[1][3]); - b[ 0] = S5[temp[0][0]]; - b[ 1] = S5[temp[3][1]]; - b[ 2] = S5[temp[2][2]]; - b[ 3] = S5[temp[1][3]]; - b[ 4] = S5[temp[1][0]]; - b[ 5] = S5[temp[0][1]]; - b[ 6] = S5[temp[3][2]]; - b[ 7] = S5[temp[2][3]]; - b[ 8] = S5[temp[2][0]]; - b[ 9] = S5[temp[1][1]]; - b[10] = S5[temp[0][2]]; - b[11] = S5[temp[3][3]]; - b[12] = S5[temp[3][0]]; - b[13] = S5[temp[2][1]]; - b[14] = S5[temp[1][2]]; - b[15] = S5[temp[0][3]]; - *((u_int32_t*)(b )) ^= *((u_int32_t*)rk[0][0]); - *((u_int32_t*)(b+ 4)) ^= *((u_int32_t*)rk[0][1]); - *((u_int32_t*)(b+ 8)) ^= *((u_int32_t*)rk[0][2]); - *((u_int32_t*)(b+12)) ^= *((u_int32_t*)rk[0][3]); - - return 0; + + return ctx; } -int -rijndael_makekey(rijndael_key *key, int direction, int keyLen, u_int8_t *keyMaterial) -{ - u_int8_t k[RIJNDAEL_MAXKC][4]; - int i; - - if (key == NULL) - return -1; - if ((direction != RIJNDAEL_ENCRYPT) && (direction != RIJNDAEL_DECRYPT)) - return -1; - if ((keyLen != 128) && (keyLen != 192) && (keyLen != 256)) - return -1; - - key->ROUNDS = keyLen/32 + 6; - - /* initialize key schedule: */ - for (i = 0; i < keyLen/8; i++) - k[i >> 2][i & 3] = (u_int8_t)keyMaterial[i]; - - rijndael_keysched(k, key->keySched, key->ROUNDS); - if (direction == RIJNDAEL_DECRYPT) - rijndael_key_enc_to_dec(key->keySched, key->ROUNDS); - return 0; +/* encrypt a block of text */ + +#define f_nround(bo, bi, k) \ + f_rn(bo, bi, 0, k); \ + f_rn(bo, bi, 1, k); \ + f_rn(bo, bi, 2, k); \ + f_rn(bo, bi, 3, k); \ + k += 4 + +#define f_lround(bo, bi, k) \ + f_rl(bo, bi, 0, k); \ + f_rl(bo, bi, 1, k); \ + f_rl(bo, bi, 2, k); \ + f_rl(bo, bi, 3, k) + +void +rijndael_encrypt(rijndael_ctx *ctx, const u4byte *in_blk, u4byte *out_blk) +{ + u4byte k_len = ctx->k_len; + u4byte *e_key = ctx->e_key; + u4byte b0[4], b1[4], *kp; + + b0[0] = io_swap(in_blk[0]) ^ e_key[0]; + b0[1] = io_swap(in_blk[1]) ^ e_key[1]; + b0[2] = io_swap(in_blk[2]) ^ e_key[2]; + b0[3] = io_swap(in_blk[3]) ^ e_key[3]; + + kp = e_key + 4; + + if(k_len > 6) { + f_nround(b1, b0, kp); f_nround(b0, b1, kp); + } + + if(k_len > 4) { + f_nround(b1, b0, kp); f_nround(b0, b1, kp); + } + + f_nround(b1, b0, kp); f_nround(b0, b1, kp); + f_nround(b1, b0, kp); f_nround(b0, b1, kp); + f_nround(b1, b0, kp); f_nround(b0, b1, kp); + f_nround(b1, b0, kp); f_nround(b0, b1, kp); + f_nround(b1, b0, kp); f_lround(b0, b1, kp); + + out_blk[0] = io_swap(b0[0]); out_blk[1] = io_swap(b0[1]); + out_blk[2] = io_swap(b0[2]); out_blk[3] = io_swap(b0[3]); +} + +/* decrypt a block of text */ + +#define i_nround(bo, bi, k) \ + i_rn(bo, bi, 0, k); \ + i_rn(bo, bi, 1, k); \ + i_rn(bo, bi, 2, k); \ + i_rn(bo, bi, 3, k); \ + k -= 4 + +#define i_lround(bo, bi, k) \ + i_rl(bo, bi, 0, k); \ + i_rl(bo, bi, 1, k); \ + i_rl(bo, bi, 2, k); \ + i_rl(bo, bi, 3, k) + +void +rijndael_decrypt(rijndael_ctx *ctx, const u4byte *in_blk, u4byte *out_blk) +{ + u4byte b0[4], b1[4], *kp; + u4byte k_len = ctx->k_len; + u4byte *e_key = ctx->e_key; + u4byte *d_key = ctx->d_key; + + b0[0] = io_swap(in_blk[0]) ^ e_key[4 * k_len + 24]; + b0[1] = io_swap(in_blk[1]) ^ e_key[4 * k_len + 25]; + b0[2] = io_swap(in_blk[2]) ^ e_key[4 * k_len + 26]; + b0[3] = io_swap(in_blk[3]) ^ e_key[4 * k_len + 27]; + + kp = d_key + 4 * (k_len + 5); + + if(k_len > 6) { + i_nround(b1, b0, kp); i_nround(b0, b1, kp); + } + + if(k_len > 4) { + i_nround(b1, b0, kp); i_nround(b0, b1, kp); + } + + i_nround(b1, b0, kp); i_nround(b0, b1, kp); + i_nround(b1, b0, kp); i_nround(b0, b1, kp); + i_nround(b1, b0, kp); i_nround(b0, b1, kp); + i_nround(b1, b0, kp); i_nround(b0, b1, kp); + i_nround(b1, b0, kp); i_lround(b0, b1, kp); + + out_blk[0] = io_swap(b0[0]); out_blk[1] = io_swap(b0[1]); + out_blk[2] = io_swap(b0[2]); out_blk[3] = io_swap(b0[3]); } -- cgit v1.2.3