diff options
author | 2014-03-14 22:15:51 +0000 | |
---|---|---|
committer | 2014-03-14 22:15:51 +0000 | |
commit | e32818a6380c2dbeba827b34a784262cda7659f9 (patch) | |
tree | 4e241f78457f6cad8c45ba406f31e37e33c952a1 | |
parent | alpha stable, bug #496628. (diff) | |
download | gentoo-2-e32818a6380c2dbeba827b34a784262cda7659f9.tar.gz gentoo-2-e32818a6380c2dbeba827b34a784262cda7659f9.tar.bz2 gentoo-2-e32818a6380c2dbeba827b34a784262cda7659f9.zip |
Fix build on 32bit systems #504616 by Toralf Förster.
(Portage version: 2.2.8-r1/cvs/Linux x86_64, signed Manifest commit with key D2E96200)
-rw-r--r-- | net-misc/openssh/ChangeLog | 6 | ||||
-rw-r--r-- | net-misc/openssh/files/openssh-6.5_p1-hpn-cipher-align.patch | 31 |
2 files changed, 27 insertions, 10 deletions
diff --git a/net-misc/openssh/ChangeLog b/net-misc/openssh/ChangeLog index 5282f4533820..ad8245979b47 100644 --- a/net-misc/openssh/ChangeLog +++ b/net-misc/openssh/ChangeLog @@ -1,6 +1,10 @@ # ChangeLog for net-misc/openssh # Copyright 1999-2014 Gentoo Foundation; Distributed under the GPL v2 -# $Header: /var/cvsroot/gentoo-x86/net-misc/openssh/ChangeLog,v 1.502 2014/03/14 12:12:13 vapier Exp $ +# $Header: /var/cvsroot/gentoo-x86/net-misc/openssh/ChangeLog,v 1.503 2014/03/14 22:15:51 vapier Exp $ + + 14 Mar 2014; Mike Frysinger <vapier@gentoo.org> + files/openssh-6.5_p1-hpn-cipher-align.patch: + Fix build on 32bit systems #504616 by Toralf Förster. *openssh-6.5_p1-r1 (14 Mar 2014) diff --git a/net-misc/openssh/files/openssh-6.5_p1-hpn-cipher-align.patch b/net-misc/openssh/files/openssh-6.5_p1-hpn-cipher-align.patch index 6610f07c35b9..cfb060fdc5f4 100644 --- a/net-misc/openssh/files/openssh-6.5_p1-hpn-cipher-align.patch +++ b/net-misc/openssh/files/openssh-6.5_p1-hpn-cipher-align.patch @@ -4,20 +4,26 @@ make sure we do not use unaligned loads/stores as some arches really hate that. --- a/cipher-ctr-mt.c +++ b/cipher-ctr-mt.c -@@ -58,8 +58,10 @@ +@@ -58,8 +58,16 @@ /* Collect thread stats and print at cancellation when in debug mode */ /* #define CIPHER_THREAD_STATS */ -/* Use single-byte XOR instead of 8-byte XOR */ -/* #define CIPHER_BYTE_XOR */ +/* Can the system do unaligned loads natively? */ -+#if defined(__x86_64__) || defined(__i386__) ++#if defined(__aarch64__) || \ ++ defined(__i386__) || \ ++ defined(__powerpc__) || \ ++ defined(__x86_64__) +# define CIPHER_UNALIGNED_OK +#endif ++#if defined(__SIZEOF_INT128__) ++# define CIPHER_INT128_OK ++#endif /*-------------------- END TUNABLES --------------------*/ -@@ -285,8 +286,18 @@ thread_loop(void *x) +@@ -285,8 +293,20 @@ thread_loop(void *x) static int ssh_aes_ctr(EVP_CIPHER_CTX *ctx, u_char *dest, const u_char *src, @@ -25,7 +31,9 @@ make sure we do not use unaligned loads/stores as some arches really hate that. + size_t len) { + typedef union { ++#ifdef CIPHER_INT128_OK + __uint128_t *u128; ++#endif + uint64_t *u64; + uint32_t *u32; + uint8_t *u8; @@ -37,7 +45,7 @@ make sure we do not use unaligned loads/stores as some arches really hate that. struct ssh_aes_ctr_ctx *c; struct kq *q, *oldq; int ridx; -@@ -301,35 +312,38 @@ ssh_aes_ctr(EVP_CIPHER_CTX *ctx, u_char *dest, const u_char *src, +@@ -301,35 +321,41 @@ ssh_aes_ctr(EVP_CIPHER_CTX *ctx, u_char *dest, const u_char *src, ridx = c->ridx; /* src already padded to block multiple */ @@ -64,18 +72,24 @@ make sure we do not use unaligned loads/stores as some arches really hate that. - dest[13] = src[13] ^ buf[13]; - dest[14] = src[14] ^ buf[14]; - dest[15] = src[15] ^ buf[15]; -+#ifdef CIPHER_UNALIGNED_OK -+ destp.u128[0] = srcp.u128[0] ^ bufp.u128[0]; - #else +-#else - *(uint64_t *)dest = *(uint64_t *)src ^ *(uint64_t *)buf; - *(uint64_t *)(dest + 8) = *(uint64_t *)(src + 8) ^ - *(uint64_t *)(buf + 8); +-#endif + /* figure out the alignment on the fly */ ++#ifdef CIPHER_UNALIGNED_OK ++ align = 0; ++#else + align = destp.u | srcp.u | bufp.u; ++#endif + ++#ifdef CIPHER_INT128_OK + if ((align & 0xf) == 0) { + destp.u128[0] = srcp.u128[0] ^ bufp.u128[0]; -+ } else if ((align & 0x7) == 0) { ++ } else ++#endif ++ if ((align & 0x7) == 0) { + destp.u64[0] = srcp.u64[0] ^ bufp.u64[0]; + destp.u64[1] = srcp.u64[1] ^ bufp.u64[1]; + } else if ((align & 0x3) == 0) { @@ -88,7 +102,6 @@ make sure we do not use unaligned loads/stores as some arches really hate that. + for (i = 0; i < AES_BLOCK_SIZE; ++i) + dest[i] = src[i] ^ buf[i]; + } - #endif - dest += 16; - src += 16; |