Skip to content

Commit c578e29

Browse files
peffgitster
authored andcommitted
bswap.h: drop unaligned loads
Our put_be32() routine and its variants (get_be32(), put_be64(), etc) has two implementations: on some platforms we cast memory in place and use nothl()/htonl(), which can cause unaligned memory access. And on others, we pick out the individual bytes using bitshifts. This introduces extra complexity, and sometimes causes compilers to generate warnings about type-punning. And it's not clear there's any performance advantage. This split goes back to 660231a (block-sha1: support for architectures with memory alignment restrictions, 2009-08-12). The unaligned versions were part of the original block-sha1 code in d7c208a (Add new optimized C 'block-sha1' routines, 2009-08-05), which says it is: Based on the mozilla SHA1 routine, but doing the input data accesses a word at a time and with 'htonl()' instead of loading bytes and shifting. Back then, Linus provided timings versus the mozilla code which showed a 27% improvement: https://lore.kernel.org/git/[email protected]/ However, the unaligned loads were either not the useful part of that speedup, or perhaps compilers and processors have changed since then. Here are times for computing the sha1 of 4GB of random data, with and without -DNO_UNALIGNED_LOADS (and BLK_SHA1=1, of course). This is with gcc 10, -O2, and the processor is a Core i9-9880H. [stock] Benchmark #1: t/helper/test-tool sha1 <foo.rand Time (mean ± σ): 6.638 s ± 0.081 s [User: 6.269 s, System: 0.368 s] Range (min … max): 6.550 s … 6.841 s 10 runs [-DNO_UNALIGNED_LOADS] Benchmark #1: t/helper/test-tool sha1 <foo.rand Time (mean ± σ): 6.418 s ± 0.015 s [User: 6.058 s, System: 0.360 s] Range (min … max): 6.394 s … 6.447 s 10 runs And here's the same test run on an AMD A8-7600, using gcc 8. [stock] Benchmark #1: t/helper/test-tool sha1 <foo.rand Time (mean ± σ): 11.721 s ± 0.113 s [User: 10.761 s, System: 0.951 s] Range (min … max): 11.509 s … 11.861 s 10 runs [-DNO_UNALIGNED_LOADS] Benchmark #1: t/helper/test-tool sha1 <foo.rand Time (mean ± σ): 11.744 s ± 0.066 s [User: 10.807 s, System: 0.928 s] Range (min … max): 11.637 s … 11.863 s 10 runs So the unaligned loads don't seem to help much, and actually make things worse. It's possible there are platforms where they provide more benefit, but: - the non-x86 platforms for which we use this code are old and obscure (powerpc and s390). - the main caller that cares about performance is block-sha1. But these days it is rarely used anyway, in favor of sha1dc (which is already much slower, and nobody seems to have cared that much). Let's just drop unaligned versions entirely in the name of simplicity. Signed-off-by: Jeff King <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
1 parent 47ae905 commit c578e29

File tree

2 files changed

+0
-25
lines changed

2 files changed

+0
-25
lines changed

Makefile

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1214,7 +1214,6 @@ SANITIZERS := $(foreach flag,$(subst $(comma),$(space),$(SANITIZE)),$(flag))
12141214
BASIC_CFLAGS += -fsanitize=$(SANITIZE) -fno-sanitize-recover=$(SANITIZE)
12151215
BASIC_CFLAGS += -fno-omit-frame-pointer
12161216
ifneq ($(filter undefined,$(SANITIZERS)),)
1217-
BASIC_CFLAGS += -DNO_UNALIGNED_LOADS
12181217
BASIC_CFLAGS += -DSHA1DC_FORCE_ALIGNED_ACCESS
12191218
endif
12201219
ifneq ($(filter leak,$(SANITIZERS)),)

compat/bswap.h

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -145,28 +145,6 @@ static inline uint64_t git_bswap64(uint64_t x)
145145

146146
#endif
147147

148-
/*
149-
* Performance might be improved if the CPU architecture is OK with
150-
* unaligned 32-bit loads and a fast ntohl() is available.
151-
* Otherwise fall back to byte loads and shifts which is portable,
152-
* and is faster on architectures with memory alignment issues.
153-
*/
154-
155-
#if !defined(NO_UNALIGNED_LOADS) && ( \
156-
defined(__i386__) || defined(__x86_64__) || \
157-
defined(_M_IX86) || defined(_M_X64) || \
158-
defined(__ppc__) || defined(__ppc64__) || \
159-
defined(__powerpc__) || defined(__powerpc64__) || \
160-
defined(__s390__) || defined(__s390x__))
161-
162-
#define get_be16(p) ntohs(*(unsigned short *)(p))
163-
#define get_be32(p) ntohl(*(unsigned int *)(p))
164-
#define get_be64(p) ntohll(*(uint64_t *)(p))
165-
#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
166-
#define put_be64(p, v) do { *(uint64_t *)(p) = htonll(v); } while (0)
167-
168-
#else
169-
170148
static inline uint16_t get_be16(const void *ptr)
171149
{
172150
const unsigned char *p = ptr;
@@ -212,6 +190,4 @@ static inline void put_be64(void *ptr, uint64_t value)
212190
p[7] = value >> 0;
213191
}
214192

215-
#endif
216-
217193
#endif /* COMPAT_BSWAP_H */

0 commit comments

Comments
 (0)