diff --git a/src/Makefile.in b/src/Makefile.in index 4997677e..abbb076e 100644 --- a/src/Makefile.in +++ b/src/Makefile.in @@ -79,7 +79,7 @@ LIBOBJDIR= LIBOBJS=@LIBOBJS@ COMPAT_OBJ=$(LIBOBJS:.o=.lo) -UTIL_OBJ=rbtree.lo val_secalgo.lo +UTIL_OBJ=rbtree.lo val_secalgo.lo lruhash.lo lookup3.lo locks.lo JSMN_OBJ=jsmn.lo @@ -341,9 +341,18 @@ inet_ntop.lo inet_ntop.o: $(srcdir)/compat/inet_ntop.c config.h inet_pton.lo inet_pton.o: $(srcdir)/compat/inet_pton.c config.h sha512.lo sha512.o: $(srcdir)/compat/sha512.c config.h strlcpy.lo strlcpy.o: $(srcdir)/compat/strlcpy.c config.h +locks.lo locks.o: $(srcdir)/util/locks.c config.h $(srcdir)/util/locks.h $(srcdir)/util/orig-headers/locks.h \ + $(srcdir)/util/auxiliary/$(srcdir)/util/log.h $(srcdir)/debug.h config.h +lookup3.lo lookup3.o: $(srcdir)/util/lookup3.c config.h $(srcdir)/util/auxiliary/$(srcdir)/util/storage/lookup3.h \ + $(srcdir)/util/lookup3.h $(srcdir)/util/orig-headers/lookup3.h +lruhash.lo lruhash.o: $(srcdir)/util/lruhash.c config.h $(srcdir)/util/auxiliary/$(srcdir)/util/storage/lruhash.h \ + $(srcdir)/util/lruhash.h $(srcdir)/util/orig-headers/lruhash.h $(srcdir)/util/locks.h \ + $(srcdir)/util/orig-headers/locks.h $(srcdir)/util/auxiliary/$(srcdir)/util/log.h $(srcdir)/debug.h config.h \ + $(srcdir)/util/auxiliary/$(srcdir)/util/fptr_wlist.h rbtree.lo rbtree.o: $(srcdir)/util/rbtree.c config.h $(srcdir)/util/auxiliary/log.h \ $(srcdir)/util/auxiliary/$(srcdir)/util/log.h $(srcdir)/debug.h config.h $(srcdir)/util/auxiliary/fptr_wlist.h \ - $(srcdir)/util/rbtree.h $(srcdir)/util/orig-headers/rbtree.h + $(srcdir)/util/auxiliary/$(srcdir)/util/fptr_wlist.h $(srcdir)/util/rbtree.h \ + $(srcdir)/util/orig-headers/rbtree.h val_secalgo.lo val_secalgo.o: $(srcdir)/util/val_secalgo.c config.h \ $(srcdir)/util/auxiliary/$(srcdir)/util/data/packed_rrset.h \ $(srcdir)/util/auxiliary/validator/val_secalgo.h $(srcdir)/util/val_secalgo.h \ diff --git a/src/util/auxiliary/fptr_wlist.h b/src/util/auxiliary/fptr_wlist.h index d98741df..1eb5a74d 100644 --- a/src/util/auxiliary/fptr_wlist.h +++ b/src/util/auxiliary/fptr_wlist.h @@ -1,42 +1 @@ -/** - * - * /brief dummy prototypes for function pointer whitelisting - * - */ - -/* - * Copyright (c) 2013, NLnet Labs, Verisign, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the names of the copyright holders nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL Verisign, Inc. BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef UTIL_FPTR_WLIST_H -#define UTIL_FPTR_WLIST_H - -#define fptr_ok(x) -#define fptr_whitelist_event(x) -#define fptr_whitelist_rbtree_cmp(x) - -#endif /* UTIL_FPTR_WLIST_H */ - +#include "util/fptr_wlist.h" diff --git a/src/util/auxiliary/util/fptr_wlist.h b/src/util/auxiliary/util/fptr_wlist.h new file mode 100644 index 00000000..d98741df --- /dev/null +++ b/src/util/auxiliary/util/fptr_wlist.h @@ -0,0 +1,42 @@ +/** + * + * /brief dummy prototypes for function pointer whitelisting + * + */ + +/* + * Copyright (c) 2013, NLnet Labs, Verisign, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the names of the copyright holders nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Verisign, Inc. BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UTIL_FPTR_WLIST_H +#define UTIL_FPTR_WLIST_H + +#define fptr_ok(x) +#define fptr_whitelist_event(x) +#define fptr_whitelist_rbtree_cmp(x) + +#endif /* UTIL_FPTR_WLIST_H */ + diff --git a/src/util/auxiliary/util/log.h b/src/util/auxiliary/util/log.h index 30c9ef8f..cdd949b2 100644 --- a/src/util/auxiliary/util/log.h +++ b/src/util/auxiliary/util/log.h @@ -37,15 +37,22 @@ #include "config.h" #include "debug.h" -#if defined(SEC_DEBUG) && SEC_DEBUG +#ifdef DEBUGGING #define verbose(x, ...) DEBUG_NL(__VA_ARGS__) -#define log_err(...) DEBUG_NL(__VA_ARGS__) +#define log_err(...) DEBUG_NL(__VA_ARGS__) +#define log_info(...) DEBUG_NL(__VA_ARGS__) +#define fatal_exit(...) do { DEBUG_NL(__VA_ARGS__); exit(EXIT_FAILURE); } while(0) +#define log_assert(x) do { if(!(x)) fatal_exit("%s:%d: %s: assertion %s failed", \ + __FILE__, __LINE__, __FUNC__, #x); \ + } while(0) #else -#define verbose(...) -#define log_err(...) +#define verbose(...) ((void)0) +#define log_err(...) ((void)0) +#define log_info(...) ((void)0) +#define fatal_exit(...) ((void)0) +#define log_assert(x) ((void)0) #endif -#define log_assert(x) #endif /* UTIL_LOG_H */ diff --git a/src/util/auxiliary/util/storage/lookup3.h b/src/util/auxiliary/util/storage/lookup3.h new file mode 100644 index 00000000..d00a0b5c --- /dev/null +++ b/src/util/auxiliary/util/storage/lookup3.h @@ -0,0 +1 @@ +#include "util/lookup3.h" diff --git a/src/util/auxiliary/util/storage/lruhash.h b/src/util/auxiliary/util/storage/lruhash.h new file mode 100644 index 00000000..cb2d18a1 --- /dev/null +++ b/src/util/auxiliary/util/storage/lruhash.h @@ -0,0 +1 @@ +#include "util/lruhash.h" diff --git a/src/util/import.sh b/src/util/import.sh index e9626988..49050268 100755 --- a/src/util/import.sh +++ b/src/util/import.sh @@ -6,3 +6,9 @@ wget -O rbtree.c ${REPO}/util/rbtree.c wget -O orig-headers/rbtree.h ${REPO}/util/rbtree.h wget -O val_secalgo.c ${REPO}/validator/val_secalgo.c wget -O orig-headers/val_secalgo.h ${REPO}/validator/val_secalgo.h +wget -O lruhash.c ${REPO}/util/storage/lruhash.c +wget -O orig-headers/lruhash.h ${REPO}/util/storage/lruhash.h +wget -O lookup3.c ${REPO}/util/storage/lookup3.c +wget -O orig-headers/lookup3.h ${REPO}/util/storage/lookup3.h +wget -O locks.c ${REPO}/util/locks.c +wget -O orig-headers/locks.h ${REPO}/util/locks.h diff --git a/src/util/locks.c b/src/util/locks.c new file mode 100644 index 00000000..b65a02bd --- /dev/null +++ b/src/util/locks.c @@ -0,0 +1,264 @@ +/** + * util/locks.c - unbound locking primitives + * + * Copyright (c) 2007, NLnet Labs. All rights reserved. + * + * This software is open source. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of the NLNET LABS nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * \file + * Implementation of locking and threading support. + * A place for locking debug code since most locking functions are macros. + */ + +#include "config.h" +#include "util/locks.h" +#include +#ifdef HAVE_SYS_WAIT_H +#include +#endif + +/** block all signals, masks them away. */ +void +ub_thread_blocksigs(void) +{ +#if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS) || defined(HAVE_SIGPROCMASK) +# if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS) + int err; +# endif + sigset_t sigset; + sigfillset(&sigset); +#ifdef HAVE_PTHREAD + if((err=pthread_sigmask(SIG_SETMASK, &sigset, NULL))) + fatal_exit("pthread_sigmask: %s", strerror(err)); +#else +# ifdef HAVE_SOLARIS_THREADS + if((err=thr_sigsetmask(SIG_SETMASK, &sigset, NULL))) + fatal_exit("thr_sigsetmask: %s", strerror(err)); +# else + /* have nothing, do single process signal mask */ + if(sigprocmask(SIG_SETMASK, &sigset, NULL)) + fatal_exit("sigprocmask: %s", strerror(errno)); +# endif /* HAVE_SOLARIS_THREADS */ +#endif /* HAVE_PTHREAD */ +#endif /* have signal stuff */ +} + +/** unblock one signal, so we can catch it */ +void ub_thread_sig_unblock(int sig) +{ +#if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS) || defined(HAVE_SIGPROCMASK) +# if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS) + int err; +# endif + sigset_t sigset; + sigemptyset(&sigset); + sigaddset(&sigset, sig); +#ifdef HAVE_PTHREAD + if((err=pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))) + fatal_exit("pthread_sigmask: %s", strerror(err)); +#else +# ifdef HAVE_SOLARIS_THREADS + if((err=thr_sigsetmask(SIG_UNBLOCK, &sigset, NULL))) + fatal_exit("thr_sigsetmask: %s", strerror(err)); +# else + /* have nothing, do single thread case */ + if(sigprocmask(SIG_UNBLOCK, &sigset, NULL)) + fatal_exit("sigprocmask: %s", strerror(errno)); +# endif /* HAVE_SOLARIS_THREADS */ +#endif /* HAVE_PTHREAD */ +#else + (void)sig; +#endif /* have signal stuff */ +} + +#if !defined(HAVE_PTHREAD) && !defined(HAVE_SOLARIS_THREADS) && !defined(HAVE_WINDOWS_THREADS) +/** + * No threading available: fork a new process. + * This means no shared data structure, and no locking. + * Only the main thread ever returns. Exits on errors. + * @param thr: the location where to store the thread-id. + * @param func: function body of the thread. Return value of func is lost. + * @param arg: user argument to func. + */ +void +ub_thr_fork_create(ub_thread_type* thr, void* (*func)(void*), void* arg) +{ + pid_t pid = fork(); + switch(pid) { + default: /* main */ + *thr = (ub_thread_type)pid; + return; + case 0: /* child */ + *thr = (ub_thread_type)getpid(); + (void)(*func)(arg); + exit(0); + case -1: /* error */ + fatal_exit("could not fork: %s", strerror(errno)); + } +} + +/** + * There is no threading. Wait for a process to terminate. + * Note that ub_thread_type is defined as pid_t. + * @param thread: the process id to wait for. + */ +void ub_thr_fork_wait(ub_thread_type thread) +{ + int status = 0; + if(waitpid((pid_t)thread, &status, 0) == -1) + log_err("waitpid(%d): %s", (int)thread, strerror(errno)); + if(status != 0) + log_warn("process %d abnormal exit with status %d", + (int)thread, status); +} +#endif /* !defined(HAVE_PTHREAD) && !defined(HAVE_SOLARIS_THREADS) && !defined(HAVE_WINDOWS_THREADS) */ + +#ifdef HAVE_SOLARIS_THREADS +void* ub_thread_key_get(ub_thread_key_type key) +{ + void* ret=NULL; + LOCKRET(thr_getspecific(key, &ret)); + return ret; +} +#endif + +#ifdef HAVE_WINDOWS_THREADS +/** log a windows GetLastError message */ +static void log_win_err(const char* str, DWORD err) +{ + LPTSTR buf; + if(FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, err, 0, (LPTSTR)&buf, 0, NULL) == 0) { + /* could not format error message */ + log_err("%s, GetLastError=%d", str, (int)err); + return; + } + log_err("%s, (err=%d): %s", str, (int)err, buf); + LocalFree(buf); +} + +void lock_basic_init(lock_basic_type* lock) +{ + /* implement own lock, because windows HANDLE as Mutex usage + * uses too many handles and would bog down the whole system. */ + (void)InterlockedExchange(lock, 0); +} + +void lock_basic_destroy(lock_basic_type* lock) +{ + (void)InterlockedExchange(lock, 0); +} + +void lock_basic_lock(lock_basic_type* lock) +{ + LONG wait = 1; /* wait 1 msec at first */ + + while(InterlockedExchange(lock, 1)) { + /* if the old value was 1 then if was already locked */ + Sleep(wait); /* wait with sleep */ + wait *= 2; /* exponential backoff for waiting */ + } + /* the old value was 0, but we inserted 1, we locked it! */ +} + +void lock_basic_unlock(lock_basic_type* lock) +{ + /* unlock it by inserting the value of 0. xchg for cache coherency. */ + (void)InterlockedExchange(lock, 0); +} + +void ub_thread_key_create(ub_thread_key_type* key, void* f) +{ + *key = TlsAlloc(); + if(*key == TLS_OUT_OF_INDEXES) { + *key = 0; + log_win_err("TlsAlloc Failed(OUT_OF_INDEXES)", GetLastError()); + } + else ub_thread_key_set(*key, f); +} + +void ub_thread_key_set(ub_thread_key_type key, void* v) +{ + if(!TlsSetValue(key, v)) { + log_win_err("TlsSetValue failed", GetLastError()); + } +} + +void* ub_thread_key_get(ub_thread_key_type key) +{ + void* ret = (void*)TlsGetValue(key); + if(ret == NULL && GetLastError() != ERROR_SUCCESS) { + log_win_err("TlsGetValue failed", GetLastError()); + } + return ret; +} + +void ub_thread_create(ub_thread_type* thr, void* (*func)(void*), void* arg) +{ +#ifndef HAVE__BEGINTHREADEX + *thr = CreateThread(NULL, /* default security (no inherit handle) */ + 0, /* default stack size */ + (LPTHREAD_START_ROUTINE)func, arg, + 0, /* default flags, run immediately */ + NULL); /* do not store thread identifier anywhere */ +#else + /* the beginthreadex routine setups for the C lib; aligns stack */ + *thr=(ub_thread_type)_beginthreadex(NULL, 0, (void*)func, arg, 0, NULL); +#endif + if(*thr == NULL) { + log_win_err("CreateThread failed", GetLastError()); + fatal_exit("thread create failed"); + } +} + +ub_thread_type ub_thread_self(void) +{ + return GetCurrentThread(); +} + +void ub_thread_join(ub_thread_type thr) +{ + DWORD ret = WaitForSingleObject(thr, INFINITE); + if(ret == WAIT_FAILED) { + log_win_err("WaitForSingleObject(Thread):WAIT_FAILED", + GetLastError()); + } else if(ret == WAIT_TIMEOUT) { + log_win_err("WaitForSingleObject(Thread):WAIT_TIMEOUT", + GetLastError()); + } + /* and close the handle to the thread */ + if(!CloseHandle(thr)) { + log_win_err("CloseHandle(Thread) failed", GetLastError()); + } +} +#endif /* HAVE_WINDOWS_THREADS */ diff --git a/src/util/locks.h b/src/util/locks.h new file mode 100644 index 00000000..fea65e0a --- /dev/null +++ b/src/util/locks.h @@ -0,0 +1,64 @@ +/** + * + * \file locks.h + * /brief Alternative symbol names for unbound's locks.h + * + */ +/* + * Copyright (c) 2017, NLnet Labs, the getdns team + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the names of the copyright holders nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Verisign, Inc. BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef LOCKS_H_SYMBOLS +#define LOCKS_H_SYMBOLS + +#include "config.h" + +#define ub_thread_blocksigs _getdns_ub_thread_blocksigs +#define ub_thread_sig_unblock _getdns_ub_thread_sig_unblock + +#define ub_thread_type _getdns_ub_thread_type +#define ub_thr_fork_create _getdns_ub_thr_fork_create +#define ub_thr_fork_wait _getdns_ub_thr_fork_wait + +#if defined(HAVE_SOLARIS_THREADS) || defined(HAVE_WINDOWS_THREADS) +#define ub_thread_key_type _getdns_ub_thread_key_type +#define ub_thread_key_create _getdns_ub_thread_key_create +#define ub_thread_key_set _getdns_ub_thread_key_set +#define ub_thread_key_get _getdns_ub_thread_key_get +#endif + +#ifdef HAVE_WINDOWS_THREADS +#define lock_basic_type _getdns_lock_basic_type +#define lock_basic_init _getdns_lock_basic_init +#define lock_basic_destroy _getdns_lock_basic_destroy +#define lock_basic_lock _getdns_lock_basic_lock_ +#define lock_basic_unlock _getdns_lock_basic_unlock + +#define ub_thread_create _getdns_ub_thread_create +#define ub_thread_self _getdns_ub_thread_self +#endif + +#include "util/orig-headers/locks.h" +#endif diff --git a/src/util/lookup3.c b/src/util/lookup3.c new file mode 100644 index 00000000..e9b05af3 --- /dev/null +++ b/src/util/lookup3.c @@ -0,0 +1,1032 @@ +/* + February 2013(Wouter) patch defines for BSD endianness, from Brad Smith. + January 2012(Wouter) added randomised initial value, fallout from 28c3. + March 2007(Wouter) adapted from lookup3.c original, add config.h include. + added #ifdef VALGRIND to remove 298,384,660 'unused variable k8' warnings. + added include of lookup3.h to check definitions match declarations. + removed include of stdint - config.h takes care of platform independence. + url http://burtleburtle.net/bob/hash/index.html. +*/ +/* +------------------------------------------------------------------------------- +lookup3.c, by Bob Jenkins, May 2006, Public Domain. + +These are functions for producing 32-bit hashes for hash table lookup. +hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() +are externally useful functions. Routines to test the hash are included +if SELF_TEST is defined. You can use this free for any purpose. It's in +the public domain. It has no warranty. + +You probably want to use hashlittle(). hashlittle() and hashbig() +hash byte arrays. hashlittle() is is faster than hashbig() on +little-endian machines. Intel and AMD are little-endian machines. +On second thought, you probably want hashlittle2(), which is identical to +hashlittle() except it returns two 32-bit hashes for the price of one. +You could implement hashbig2() if you wanted but I haven't bothered here. + +If you want to find a hash of, say, exactly 7 integers, do + a = i1; b = i2; c = i3; + mix(a,b,c); + a += i4; b += i5; c += i6; + mix(a,b,c); + a += i7; + final(a,b,c); +then use c as the hash value. If you have a variable length array of +4-byte integers to hash, use hashword(). If you have a byte array (like +a character string), use hashlittle(). If you have several byte arrays, or +a mix of things, see the comments above hashlittle(). + +Why is this so big? I read 12 bytes at a time into 3 4-byte integers, +then mix those integers. This is fast (you can do a lot more thorough +mixing with 12*3 instructions on 3 integers than you can with 3 instructions +on 1 byte), but shoehorning those bytes into integers efficiently is messy. +------------------------------------------------------------------------------- +*/ +/*#define SELF_TEST 1*/ + +#include "config.h" +#include "util/storage/lookup3.h" +#include /* defines printf for tests */ +#include /* defines time_t for timings in the test */ +/*#include defines uint32_t etc (from config.h) */ +#include /* attempt to define endianness */ +#ifdef HAVE_SYS_TYPES_H +# include /* attempt to define endianness (solaris) */ +#endif +#if defined(linux) || defined(__OpenBSD__) +# ifdef HAVE_ENDIAN_H +# include /* attempt to define endianness */ +# else +# include /* on older OpenBSD */ +# endif +#endif +#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) +#include /* attempt to define endianness */ +#endif + +/* random initial value */ +static uint32_t raninit = (uint32_t)0xdeadbeef; + +void +hash_set_raninit(uint32_t v) +{ + raninit = v; +} + +/* + * My best guess at if you are big-endian or little-endian. This may + * need adjustment. + */ +#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \ + __BYTE_ORDER == __LITTLE_ENDIAN) || \ + (defined(i386) || defined(__i386__) || defined(__i486__) || \ + defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL) || defined(__x86)) +# define HASH_LITTLE_ENDIAN 1 +# define HASH_BIG_ENDIAN 0 +#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \ + __BYTE_ORDER == __BIG_ENDIAN) || \ + (defined(sparc) || defined(__sparc) || defined(__sparc__) || defined(POWERPC) || defined(mc68000) || defined(sel)) +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 1 +#elif defined(_MACHINE_ENDIAN_H_) +/* test for machine_endian_h protects failure if some are empty strings */ +# if defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && _BYTE_ORDER == _BIG_ENDIAN +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 1 +# endif +# if defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && _BYTE_ORDER == _LITTLE_ENDIAN +# define HASH_LITTLE_ENDIAN 1 +# define HASH_BIG_ENDIAN 0 +# endif /* _MACHINE_ENDIAN_H_ */ +#else +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 0 +#endif + +#define hashsize(n) ((uint32_t)1<<(n)) +#define hashmask(n) (hashsize(n)-1) +#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) + +/* +------------------------------------------------------------------------------- +mix -- mix 3 32-bit values reversibly. + +This is reversible, so any information in (a,b,c) before mix() is +still in (a,b,c) after mix(). + +If four pairs of (a,b,c) inputs are run through mix(), or through +mix() in reverse, there are at least 32 bits of the output that +are sometimes the same for one pair and different for another pair. +This was tested for: +* pairs that differed by one bit, by two bits, in any combination + of top bits of (a,b,c), or in any combination of bottom bits of + (a,b,c). +* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + is commonly produced by subtraction) look like a single 1-bit + difference. +* the base values were pseudorandom, all zero but one bit set, or + all zero plus a counter that starts at zero. + +Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that +satisfy this are + 4 6 8 16 19 4 + 9 15 3 18 27 15 + 14 9 3 7 17 3 +Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing +for "differ" defined as + with a one-bit base and a two-bit delta. I +used http://burtleburtle.net/bob/hash/avalanche.html to choose +the operations, constants, and arrangements of the variables. + +This does not achieve avalanche. There are input bits of (a,b,c) +that fail to affect some output bits of (a,b,c), especially of a. The +most thoroughly mixed value is c, but it doesn't really even achieve +avalanche in c. + +This allows some parallelism. Read-after-writes are good at doubling +the number of bits affected, so the goal of mixing pulls in the opposite +direction as the goal of parallelism. I did what I could. Rotates +seem to cost as much as shifts on every machine I could lay my hands +on, and rotates are much kinder to the top and bottom bits, so I used +rotates. +------------------------------------------------------------------------------- +*/ +#define mix(a,b,c) \ +{ \ + a -= c; a ^= rot(c, 4); c += b; \ + b -= a; b ^= rot(a, 6); a += c; \ + c -= b; c ^= rot(b, 8); b += a; \ + a -= c; a ^= rot(c,16); c += b; \ + b -= a; b ^= rot(a,19); a += c; \ + c -= b; c ^= rot(b, 4); b += a; \ +} + +/* +------------------------------------------------------------------------------- +final -- final mixing of 3 32-bit values (a,b,c) into c + +Pairs of (a,b,c) values differing in only a few bits will usually +produce values of c that look totally different. This was tested for +* pairs that differed by one bit, by two bits, in any combination + of top bits of (a,b,c), or in any combination of bottom bits of + (a,b,c). +* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + is commonly produced by subtraction) look like a single 1-bit + difference. +* the base values were pseudorandom, all zero but one bit set, or + all zero plus a counter that starts at zero. + +These constants passed: + 14 11 25 16 4 14 24 + 12 14 25 16 4 14 24 +and these came close: + 4 8 15 26 3 22 24 + 10 8 15 26 3 22 24 + 11 8 15 26 3 22 24 +------------------------------------------------------------------------------- +*/ +#define final(a,b,c) \ +{ \ + c ^= b; c -= rot(b,14); \ + a ^= c; a -= rot(c,11); \ + b ^= a; b -= rot(a,25); \ + c ^= b; c -= rot(b,16); \ + a ^= c; a -= rot(c,4); \ + b ^= a; b -= rot(a,14); \ + c ^= b; c -= rot(b,24); \ +} + +/* +-------------------------------------------------------------------- + This works on all machines. To be useful, it requires + -- that the key be an array of uint32_t's, and + -- that the length be the number of uint32_t's in the key + + The function hashword() is identical to hashlittle() on little-endian + machines, and identical to hashbig() on big-endian machines, + except that the length has to be measured in uint32_ts rather than in + bytes. hashlittle() is more complicated than hashword() only because + hashlittle() has to dance around fitting the key bytes into registers. +-------------------------------------------------------------------- +*/ +uint32_t hashword( +const uint32_t *k, /* the key, an array of uint32_t values */ +size_t length, /* the length of the key, in uint32_ts */ +uint32_t initval) /* the previous hash, or an arbitrary value */ +{ + uint32_t a,b,c; + + /* Set up the internal state */ + a = b = c = raninit + (((uint32_t)length)<<2) + initval; + + /*------------------------------------------------- handle most of the key */ + while (length > 3) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 3; + k += 3; + } + + /*------------------------------------------- handle the last 3 uint32_t's */ + switch(length) /* all the case statements fall through */ + { + case 3 : c+=k[2]; + case 2 : b+=k[1]; + case 1 : a+=k[0]; + final(a,b,c); + case 0: /* case 0: nothing left to add */ + break; + } + /*------------------------------------------------------ report the result */ + return c; +} + + +#ifdef SELF_TEST + +/* +-------------------------------------------------------------------- +hashword2() -- same as hashword(), but take two seeds and return two +32-bit values. pc and pb must both be nonnull, and *pc and *pb must +both be initialized with seeds. If you pass in (*pb)==0, the output +(*pc) will be the same as the return value from hashword(). +-------------------------------------------------------------------- +*/ +void hashword2 ( +const uint32_t *k, /* the key, an array of uint32_t values */ +size_t length, /* the length of the key, in uint32_ts */ +uint32_t *pc, /* IN: seed OUT: primary hash value */ +uint32_t *pb) /* IN: more seed OUT: secondary hash value */ +{ + uint32_t a,b,c; + + /* Set up the internal state */ + a = b = c = raninit + ((uint32_t)(length<<2)) + *pc; + c += *pb; + + /*------------------------------------------------- handle most of the key */ + while (length > 3) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 3; + k += 3; + } + + /*------------------------------------------- handle the last 3 uint32_t's */ + switch(length) /* all the case statements fall through */ + { + case 3 : c+=k[2]; + case 2 : b+=k[1]; + case 1 : a+=k[0]; + final(a,b,c); + case 0: /* case 0: nothing left to add */ + break; + } + /*------------------------------------------------------ report the result */ + *pc=c; *pb=b; +} + +#endif /* SELF_TEST */ + +/* +------------------------------------------------------------------------------- +hashlittle() -- hash a variable-length key into a 32-bit value + k : the key (the unaligned variable-length array of bytes) + length : the length of the key, counting by bytes + initval : can be any 4-byte value +Returns a 32-bit value. Every bit of the key affects every bit of +the return value. Two keys differing by one or two bits will have +totally different hash values. + +The best hash table sizes are powers of 2. There is no need to do +mod a prime (mod is sooo slow!). If you need less than 32 bits, +use a bitmask. For example, if you need only 10 bits, do + h = (h & hashmask(10)); +In which case, the hash table should have hashsize(10) elements. + +If you are hashing n strings (uint8_t **)k, do it like this: + for (i=0, h=0; i 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]&0xffffff" actually reads beyond the end of the string, but + * then masks off the part it's not allowed to read. Because the + * string is aligned, the masked-off tail is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticeably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff; a+=k[0]; break; + case 6 : b+=k[1]&0xffff; a+=k[0]; break; + case 5 : b+=k[1]&0xff; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff; break; + case 2 : a+=k[0]&0xffff; break; + case 1 : a+=k[0]&0xff; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ + case 1 : a+=k8[0]; break; + case 0 : return c; + } + +#endif /* !valgrind */ + + } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { + const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ + const uint8_t *k8; + + /*--------------- all but last block: aligned reads and different mixing */ + while (length > 12) + { + a += k[0] + (((uint32_t)k[1])<<16); + b += k[2] + (((uint32_t)k[3])<<16); + c += k[4] + (((uint32_t)k[5])<<16); + mix(a,b,c); + length -= 12; + k += 6; + } + + /*----------------------------- handle the last (probably partial) block */ + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[4]+(((uint32_t)k[5])<<16); + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=k[4]; + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=k[2]; + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=k[0]; + break; + case 1 : a+=k8[0]; + break; + case 0 : return c; /* zero length requires no mixing */ + } + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + a += ((uint32_t)k[1])<<8; + a += ((uint32_t)k[2])<<16; + a += ((uint32_t)k[3])<<24; + b += k[4]; + b += ((uint32_t)k[5])<<8; + b += ((uint32_t)k[6])<<16; + b += ((uint32_t)k[7])<<24; + c += k[8]; + c += ((uint32_t)k[9])<<8; + c += ((uint32_t)k[10])<<16; + c += ((uint32_t)k[11])<<24; + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=((uint32_t)k[11])<<24; + case 11: c+=((uint32_t)k[10])<<16; + case 10: c+=((uint32_t)k[9])<<8; + case 9 : c+=k[8]; + case 8 : b+=((uint32_t)k[7])<<24; + case 7 : b+=((uint32_t)k[6])<<16; + case 6 : b+=((uint32_t)k[5])<<8; + case 5 : b+=k[4]; + case 4 : a+=((uint32_t)k[3])<<24; + case 3 : a+=((uint32_t)k[2])<<16; + case 2 : a+=((uint32_t)k[1])<<8; + case 1 : a+=k[0]; + break; + case 0 : return c; + } + } + + final(a,b,c); + return c; +} + +#ifdef SELF_TEST + +/* + * hashlittle2: return 2 32-bit hash values + * + * This is identical to hashlittle(), except it returns two 32-bit hash + * values instead of just one. This is good enough for hash table + * lookup with 2^^64 buckets, or if you want a second hash if you're not + * happy with the first, or if you want a probably-unique 64-bit ID for + * the key. *pc is better mixed than *pb, so use *pc first. If you want + * a 64-bit value do something like "*pc + (((uint64_t)*pb)<<32)". + */ +void hashlittle2( + const void *key, /* the key to hash */ + size_t length, /* length of the key */ + uint32_t *pc, /* IN: primary initval, OUT: primary hash */ + uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */ +{ + uint32_t a,b,c; /* internal state */ + union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */ + + /* Set up the internal state */ + a = b = c = raninit + ((uint32_t)length) + *pc; + c += *pb; + + u.ptr = key; + if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { + const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ +#ifdef VALGRIND + const uint8_t *k8; +#endif + + /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]&0xffffff" actually reads beyond the end of the string, but + * then masks off the part it's not allowed to read. Because the + * string is aligned, the masked-off tail is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticeably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff; a+=k[0]; break; + case 6 : b+=k[1]&0xffff; a+=k[0]; break; + case 5 : b+=k[1]&0xff; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff; break; + case 2 : a+=k[0]&0xffff; break; + case 1 : a+=k[0]&0xff; break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ + case 1 : a+=k8[0]; break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + +#endif /* !valgrind */ + + } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { + const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ + const uint8_t *k8; + + /*--------------- all but last block: aligned reads and different mixing */ + while (length > 12) + { + a += k[0] + (((uint32_t)k[1])<<16); + b += k[2] + (((uint32_t)k[3])<<16); + c += k[4] + (((uint32_t)k[5])<<16); + mix(a,b,c); + length -= 12; + k += 6; + } + + /*----------------------------- handle the last (probably partial) block */ + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[4]+(((uint32_t)k[5])<<16); + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=k[4]; + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=k[2]; + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=k[0]; + break; + case 1 : a+=k8[0]; + break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + a += ((uint32_t)k[1])<<8; + a += ((uint32_t)k[2])<<16; + a += ((uint32_t)k[3])<<24; + b += k[4]; + b += ((uint32_t)k[5])<<8; + b += ((uint32_t)k[6])<<16; + b += ((uint32_t)k[7])<<24; + c += k[8]; + c += ((uint32_t)k[9])<<8; + c += ((uint32_t)k[10])<<16; + c += ((uint32_t)k[11])<<24; + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=((uint32_t)k[11])<<24; + case 11: c+=((uint32_t)k[10])<<16; + case 10: c+=((uint32_t)k[9])<<8; + case 9 : c+=k[8]; + case 8 : b+=((uint32_t)k[7])<<24; + case 7 : b+=((uint32_t)k[6])<<16; + case 6 : b+=((uint32_t)k[5])<<8; + case 5 : b+=k[4]; + case 4 : a+=((uint32_t)k[3])<<24; + case 3 : a+=((uint32_t)k[2])<<16; + case 2 : a+=((uint32_t)k[1])<<8; + case 1 : a+=k[0]; + break; + case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ + } + } + + final(a,b,c); + *pc=c; *pb=b; +} + +#endif /* SELF_TEST */ + +#if 0 /* currently not used */ + +/* + * hashbig(): + * This is the same as hashword() on big-endian machines. It is different + * from hashlittle() on all machines. hashbig() takes advantage of + * big-endian byte ordering. + */ +uint32_t hashbig( const void *key, size_t length, uint32_t initval) +{ + uint32_t a,b,c; + union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */ + + /* Set up the internal state */ + a = b = c = raninit + ((uint32_t)length) + initval; + + u.ptr = key; + if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) { + const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ +#ifdef VALGRIND + const uint8_t *k8; +#endif + + /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]<<8" actually reads beyond the end of the string, but + * then shifts out the part it's not allowed to read. Because the + * string is aligned, the illegal read is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticeably faster for short strings (like English words). + */ +#ifndef VALGRIND + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff00; a+=k[0]; break; + case 6 : b+=k[1]&0xffff0000; a+=k[0]; break; + case 5 : b+=k[1]&0xff000000; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff00; break; + case 2 : a+=k[0]&0xffff0000; break; + case 1 : a+=k[0]&0xff000000; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + + k8 = (const uint8_t *)k; + switch(length) /* all the case statements fall through */ + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<8; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<16; /* fall through */ + case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */ + case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */ + case 1 : a+=((uint32_t)k8[0])<<24; break; + case 0 : return c; + } + +#endif /* !VALGRIND */ + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += ((uint32_t)k[0])<<24; + a += ((uint32_t)k[1])<<16; + a += ((uint32_t)k[2])<<8; + a += ((uint32_t)k[3]); + b += ((uint32_t)k[4])<<24; + b += ((uint32_t)k[5])<<16; + b += ((uint32_t)k[6])<<8; + b += ((uint32_t)k[7]); + c += ((uint32_t)k[8])<<24; + c += ((uint32_t)k[9])<<16; + c += ((uint32_t)k[10])<<8; + c += ((uint32_t)k[11]); + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=k[11]; + case 11: c+=((uint32_t)k[10])<<8; + case 10: c+=((uint32_t)k[9])<<16; + case 9 : c+=((uint32_t)k[8])<<24; + case 8 : b+=k[7]; + case 7 : b+=((uint32_t)k[6])<<8; + case 6 : b+=((uint32_t)k[5])<<16; + case 5 : b+=((uint32_t)k[4])<<24; + case 4 : a+=k[3]; + case 3 : a+=((uint32_t)k[2])<<8; + case 2 : a+=((uint32_t)k[1])<<16; + case 1 : a+=((uint32_t)k[0])<<24; + break; + case 0 : return c; + } + } + + final(a,b,c); + return c; +} + +#endif /* 0 == currently not used */ + +#ifdef SELF_TEST + +/* used for timings */ +void driver1(void) +{ + uint8_t buf[256]; + uint32_t i; + uint32_t h=0; + time_t a,z; + + time(&a); + for (i=0; i<256; ++i) buf[i] = 'x'; + for (i=0; i<1; ++i) + { + h = hashlittle(&buf[0],1,h); + } + time(&z); + if (z-a > 0) printf("time %d %.8x\n", z-a, h); +} + +/* check that every input bit changes every output bit half the time */ +#define HASHSTATE 1 +#define HASHLEN 1 +#define MAXPAIR 60 +#define MAXLEN 70 +void driver2(void) +{ + uint8_t qa[MAXLEN+1], qb[MAXLEN+2], *a = &qa[0], *b = &qb[1]; + uint32_t c[HASHSTATE], d[HASHSTATE], i=0, j=0, k, l, m=0, z; + uint32_t e[HASHSTATE],f[HASHSTATE],g[HASHSTATE],h[HASHSTATE]; + uint32_t x[HASHSTATE],y[HASHSTATE]; + uint32_t hlen; + + printf("No more than %d trials should ever be needed \n",MAXPAIR/2); + for (hlen=0; hlen < MAXLEN; ++hlen) + { + z=0; + for (i=0; i>(8-j)); + c[0] = hashlittle(a, hlen, m); + b[i] ^= ((k+1)<>(8-j)); + d[0] = hashlittle(b, hlen, m); + /* check every bit is 1, 0, set, and not set at least once */ + for (l=0; lz) z=k; + if (k==MAXPAIR) + { + printf("Some bit didn't change: "); + printf("%.8x %.8x %.8x %.8x %.8x %.8x ", + e[0],f[0],g[0],h[0],x[0],y[0]); + printf("i %d j %d m %d len %d\n", i, j, m, hlen); + } + if (z==MAXPAIR) goto done; + } + } + } + done: + if (z < MAXPAIR) + { + printf("Mix success %2d bytes %2d initvals ",i,m); + printf("required %d trials\n", z/2); + } + } + printf("\n"); +} + +/* Check for reading beyond the end of the buffer and alignment problems */ +void driver3(void) +{ + uint8_t buf[MAXLEN+20], *b; + uint32_t len; + uint8_t q[] = "This is the time for all good men to come to the aid of their country..."; + uint32_t h; + uint8_t qq[] = "xThis is the time for all good men to come to the aid of their country..."; + uint32_t i; + uint8_t qqq[] = "xxThis is the time for all good men to come to the aid of their country..."; + uint32_t j; + uint8_t qqqq[] = "xxxThis is the time for all good men to come to the aid of their country..."; + uint32_t ref,x,y; + uint8_t *p; + + printf("Endianness. These lines should all be the same (for values filled in):\n"); + printf("%.8x %.8x %.8x\n", + hashword((const uint32_t *)q, (sizeof(q)-1)/4, 13), + hashword((const uint32_t *)q, (sizeof(q)-5)/4, 13), + hashword((const uint32_t *)q, (sizeof(q)-9)/4, 13)); + p = q; + printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", + hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), + hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), + hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), + hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), + hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), + hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); + p = &qq[1]; + printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", + hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), + hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), + hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), + hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), + hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), + hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); + p = &qqq[2]; + printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", + hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), + hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), + hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), + hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), + hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), + hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); + p = &qqqq[3]; + printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", + hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), + hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), + hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), + hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), + hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), + hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); + printf("\n"); + + /* check that hashlittle2 and hashlittle produce the same results */ + i=47; j=0; + hashlittle2(q, sizeof(q), &i, &j); + if (hashlittle(q, sizeof(q), 47) != i) + printf("hashlittle2 and hashlittle mismatch\n"); + + /* check that hashword2 and hashword produce the same results */ + len = raninit; + i=47, j=0; + hashword2(&len, 1, &i, &j); + if (hashword(&len, 1, 47) != i) + printf("hashword2 and hashword mismatch %x %x\n", + i, hashword(&len, 1, 47)); + + /* check hashlittle doesn't read before or after the ends of the string */ + for (h=0, b=buf+1; h<8; ++h, ++b) + { + for (i=0; ilock); + table->sizefunc = sizefunc; + table->compfunc = compfunc; + table->delkeyfunc = delkeyfunc; + table->deldatafunc = deldatafunc; + table->cb_arg = arg; + table->size = start_size; + table->size_mask = (int)(start_size-1); + table->lru_start = NULL; + table->lru_end = NULL; + table->num = 0; + table->space_used = 0; + table->space_max = maxmem; + table->array = calloc(table->size, sizeof(struct lruhash_bin)); + if(!table->array) { + lock_quick_destroy(&table->lock); + free(table); + return NULL; + } + bin_init(table->array, table->size); + lock_protect(&table->lock, table, sizeof(*table)); + lock_protect(&table->lock, table->array, + table->size*sizeof(struct lruhash_bin)); + return table; +} + +void +bin_delete(struct lruhash* table, struct lruhash_bin* bin) +{ + struct lruhash_entry* p, *np; + void *d; + if(!bin) + return; + lock_quick_destroy(&bin->lock); + p = bin->overflow_list; + bin->overflow_list = NULL; + while(p) { + np = p->overflow_next; + d = p->data; + (*table->delkeyfunc)(p->key, table->cb_arg); + (*table->deldatafunc)(d, table->cb_arg); + p = np; + } +} + +void +bin_split(struct lruhash* table, struct lruhash_bin* newa, + int newmask) +{ + size_t i; + struct lruhash_entry *p, *np; + struct lruhash_bin* newbin; + /* move entries to new table. Notice that since hash x is mapped to + * bin x & mask, and new mask uses one more bit, so all entries in + * one bin will go into the old bin or bin | newbit */ +#ifndef THREADS_DISABLED + int newbit = newmask - table->size_mask; +#endif + /* so, really, this task could also be threaded, per bin. */ + /* LRU list is not changed */ + for(i=0; isize; i++) + { + lock_quick_lock(&table->array[i].lock); + p = table->array[i].overflow_list; + /* lock both destination bins */ + lock_quick_lock(&newa[i].lock); + lock_quick_lock(&newa[newbit|i].lock); + while(p) { + np = p->overflow_next; + /* link into correct new bin */ + newbin = &newa[p->hash & newmask]; + p->overflow_next = newbin->overflow_list; + newbin->overflow_list = p; + p=np; + } + lock_quick_unlock(&newa[i].lock); + lock_quick_unlock(&newa[newbit|i].lock); + lock_quick_unlock(&table->array[i].lock); + } +} + +void +lruhash_delete(struct lruhash* table) +{ + size_t i; + if(!table) + return; + /* delete lock on hashtable to force check its OK */ + lock_quick_destroy(&table->lock); + for(i=0; isize; i++) + bin_delete(table, &table->array[i]); + free(table->array); + free(table); +} + +void +bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry) +{ + struct lruhash_entry* p = bin->overflow_list; + struct lruhash_entry** prevp = &bin->overflow_list; + while(p) { + if(p == entry) { + *prevp = p->overflow_next; + return; + } + prevp = &p->overflow_next; + p = p->overflow_next; + } +} + +void +reclaim_space(struct lruhash* table, struct lruhash_entry** list) +{ + struct lruhash_entry* d; + struct lruhash_bin* bin; + log_assert(table); + /* does not delete MRU entry, so table will not be empty. */ + while(table->num > 1 && table->space_used > table->space_max) { + /* notice that since we hold the hashtable lock, nobody + can change the lru chain. So it cannot be deleted underneath + us. We still need the hashbin and entry write lock to make + sure we flush all users away from the entry. + which is unlikely, since it is LRU, if someone got a rdlock + it would be moved to front, but to be sure. */ + d = table->lru_end; + /* specialised, delete from end of double linked list, + and we know num>1, so there is a previous lru entry. */ + log_assert(d && d->lru_prev); + table->lru_end = d->lru_prev; + d->lru_prev->lru_next = NULL; + /* schedule entry for deletion */ + bin = &table->array[d->hash & table->size_mask]; + table->num --; + lock_quick_lock(&bin->lock); + bin_overflow_remove(bin, d); + d->overflow_next = *list; + *list = d; + lock_rw_wrlock(&d->lock); + table->space_used -= table->sizefunc(d->key, d->data); + if(table->markdelfunc) + (*table->markdelfunc)(d->key); + lock_rw_unlock(&d->lock); + lock_quick_unlock(&bin->lock); + } +} + +struct lruhash_entry* +bin_find_entry(struct lruhash* table, + struct lruhash_bin* bin, hashvalue_type hash, void* key) +{ + struct lruhash_entry* p = bin->overflow_list; + while(p) { + if(p->hash == hash && table->compfunc(p->key, key) == 0) + return p; + p = p->overflow_next; + } + return NULL; +} + +void +table_grow(struct lruhash* table) +{ + struct lruhash_bin* newa; + int newmask; + size_t i; + if(table->size_mask == (int)(((size_t)-1)>>1)) { + log_err("hash array malloc: size_t too small"); + return; + } + /* try to allocate new array, if not fail */ + newa = calloc(table->size*2, sizeof(struct lruhash_bin)); + if(!newa) { + log_err("hash grow: malloc failed"); + /* continue with smaller array. Though its slower. */ + return; + } + bin_init(newa, table->size*2); + newmask = (table->size_mask << 1) | 1; + bin_split(table, newa, newmask); + /* delete the old bins */ + lock_unprotect(&table->lock, table->array); + for(i=0; isize; i++) { + lock_quick_destroy(&table->array[i].lock); + } + free(table->array); + + table->size *= 2; + table->size_mask = newmask; + table->array = newa; + lock_protect(&table->lock, table->array, + table->size*sizeof(struct lruhash_bin)); + return; +} + +void +lru_front(struct lruhash* table, struct lruhash_entry* entry) +{ + entry->lru_prev = NULL; + entry->lru_next = table->lru_start; + if(!table->lru_start) + table->lru_end = entry; + else table->lru_start->lru_prev = entry; + table->lru_start = entry; +} + +void +lru_remove(struct lruhash* table, struct lruhash_entry* entry) +{ + if(entry->lru_prev) + entry->lru_prev->lru_next = entry->lru_next; + else table->lru_start = entry->lru_next; + if(entry->lru_next) + entry->lru_next->lru_prev = entry->lru_prev; + else table->lru_end = entry->lru_prev; +} + +void +lru_touch(struct lruhash* table, struct lruhash_entry* entry) +{ + log_assert(table && entry); + if(entry == table->lru_start) + return; /* nothing to do */ + /* remove from current lru position */ + lru_remove(table, entry); + /* add at front */ + lru_front(table, entry); +} + +void +lruhash_insert(struct lruhash* table, hashvalue_type hash, + struct lruhash_entry* entry, void* data, void* cb_arg) +{ + struct lruhash_bin* bin; + struct lruhash_entry* found, *reclaimlist=NULL; + size_t need_size; + fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc)); + fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); + fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); + fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); + fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); + need_size = table->sizefunc(entry->key, data); + if(cb_arg == NULL) cb_arg = table->cb_arg; + + /* find bin */ + lock_quick_lock(&table->lock); + bin = &table->array[hash & table->size_mask]; + lock_quick_lock(&bin->lock); + + /* see if entry exists already */ + if(!(found=bin_find_entry(table, bin, hash, entry->key))) { + /* if not: add to bin */ + entry->overflow_next = bin->overflow_list; + bin->overflow_list = entry; + lru_front(table, entry); + table->num++; + table->space_used += need_size; + } else { + /* if so: update data - needs a writelock */ + table->space_used += need_size - + (*table->sizefunc)(found->key, found->data); + (*table->delkeyfunc)(entry->key, cb_arg); + lru_touch(table, found); + lock_rw_wrlock(&found->lock); + (*table->deldatafunc)(found->data, cb_arg); + found->data = data; + lock_rw_unlock(&found->lock); + } + lock_quick_unlock(&bin->lock); + if(table->space_used > table->space_max) + reclaim_space(table, &reclaimlist); + if(table->num >= table->size) + table_grow(table); + lock_quick_unlock(&table->lock); + + /* finish reclaim if any (outside of critical region) */ + while(reclaimlist) { + struct lruhash_entry* n = reclaimlist->overflow_next; + void* d = reclaimlist->data; + (*table->delkeyfunc)(reclaimlist->key, cb_arg); + (*table->deldatafunc)(d, cb_arg); + reclaimlist = n; + } +} + +struct lruhash_entry* +lruhash_lookup(struct lruhash* table, hashvalue_type hash, void* key, int wr) +{ + struct lruhash_entry* entry; + struct lruhash_bin* bin; + fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); + + lock_quick_lock(&table->lock); + bin = &table->array[hash & table->size_mask]; + lock_quick_lock(&bin->lock); + if((entry=bin_find_entry(table, bin, hash, key))) + lru_touch(table, entry); + lock_quick_unlock(&table->lock); + + if(entry) { + if(wr) { lock_rw_wrlock(&entry->lock); } + else { lock_rw_rdlock(&entry->lock); } + } + lock_quick_unlock(&bin->lock); + return entry; +} + +void +lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key) +{ + struct lruhash_entry* entry; + struct lruhash_bin* bin; + void *d; + fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc)); + fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); + fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); + fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); + fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); + + lock_quick_lock(&table->lock); + bin = &table->array[hash & table->size_mask]; + lock_quick_lock(&bin->lock); + if((entry=bin_find_entry(table, bin, hash, key))) { + bin_overflow_remove(bin, entry); + lru_remove(table, entry); + } else { + lock_quick_unlock(&table->lock); + lock_quick_unlock(&bin->lock); + return; + } + table->num--; + table->space_used -= (*table->sizefunc)(entry->key, entry->data); + lock_quick_unlock(&table->lock); + lock_rw_wrlock(&entry->lock); + if(table->markdelfunc) + (*table->markdelfunc)(entry->key); + lock_rw_unlock(&entry->lock); + lock_quick_unlock(&bin->lock); + /* finish removal */ + d = entry->data; + (*table->delkeyfunc)(entry->key, table->cb_arg); + (*table->deldatafunc)(d, table->cb_arg); +} + +/** clear bin, respecting locks, does not do space, LRU */ +static void +bin_clear(struct lruhash* table, struct lruhash_bin* bin) +{ + struct lruhash_entry* p, *np; + void *d; + lock_quick_lock(&bin->lock); + p = bin->overflow_list; + while(p) { + lock_rw_wrlock(&p->lock); + np = p->overflow_next; + d = p->data; + if(table->markdelfunc) + (*table->markdelfunc)(p->key); + lock_rw_unlock(&p->lock); + (*table->delkeyfunc)(p->key, table->cb_arg); + (*table->deldatafunc)(d, table->cb_arg); + p = np; + } + bin->overflow_list = NULL; + lock_quick_unlock(&bin->lock); +} + +void +lruhash_clear(struct lruhash* table) +{ + size_t i; + if(!table) + return; + fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); + fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); + fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); + + lock_quick_lock(&table->lock); + for(i=0; isize; i++) { + bin_clear(table, &table->array[i]); + } + table->lru_start = NULL; + table->lru_end = NULL; + table->num = 0; + table->space_used = 0; + lock_quick_unlock(&table->lock); +} + +void +lruhash_status(struct lruhash* table, const char* id, int extended) +{ + lock_quick_lock(&table->lock); + log_info("%s: %u entries, memory %u / %u", + id, (unsigned)table->num, (unsigned)table->space_used, + (unsigned)table->space_max); + log_info(" itemsize %u, array %u, mask %d", + (unsigned)(table->num? table->space_used/table->num : 0), + (unsigned)table->size, table->size_mask); + if(extended) { + size_t i; + int min=(int)table->size*2, max=-2; + for(i=0; isize; i++) { + int here = 0; + struct lruhash_entry *en; + lock_quick_lock(&table->array[i].lock); + en = table->array[i].overflow_list; + while(en) { + here ++; + en = en->overflow_next; + } + lock_quick_unlock(&table->array[i].lock); + if(extended >= 2) + log_info("bin[%d] %d", (int)i, here); + if(here > max) max = here; + if(here < min) min = here; + } + log_info(" bin min %d, avg %.2lf, max %d", min, + (double)table->num/(double)table->size, max); + } + lock_quick_unlock(&table->lock); +} + +size_t +lruhash_get_mem(struct lruhash* table) +{ + size_t s; + lock_quick_lock(&table->lock); + s = sizeof(struct lruhash) + table->space_used; +#ifdef USE_THREAD_DEBUG + if(table->size != 0) { + size_t i; + for(i=0; isize; i++) + s += sizeof(struct lruhash_bin) + + lock_get_mem(&table->array[i].lock); + } +#else /* no THREAD_DEBUG */ + if(table->size != 0) + s += (table->size)*(sizeof(struct lruhash_bin) + + lock_get_mem(&table->array[0].lock)); +#endif + lock_quick_unlock(&table->lock); + s += lock_get_mem(&table->lock); + return s; +} + +void +lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md) +{ + lock_quick_lock(&table->lock); + table->markdelfunc = md; + lock_quick_unlock(&table->lock); +} + +void +lruhash_traverse(struct lruhash* h, int wr, + void (*func)(struct lruhash_entry*, void*), void* arg) +{ + size_t i; + struct lruhash_entry* e; + + lock_quick_lock(&h->lock); + for(i=0; isize; i++) { + lock_quick_lock(&h->array[i].lock); + for(e = h->array[i].overflow_list; e; e = e->overflow_next) { + if(wr) { + lock_rw_wrlock(&e->lock); + } else { + lock_rw_rdlock(&e->lock); + } + (*func)(e, arg); + lock_rw_unlock(&e->lock); + } + lock_quick_unlock(&h->array[i].lock); + } + lock_quick_unlock(&h->lock); +} diff --git a/src/util/lruhash.h b/src/util/lruhash.h new file mode 100644 index 00000000..17c8b551 --- /dev/null +++ b/src/util/lruhash.h @@ -0,0 +1,68 @@ +/** + * + * \file lruhash.h + * /brief Alternative symbol names for unbound's lruhash.h + * + */ +/* + * Copyright (c) 2017, NLnet Labs, the getdns team + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the names of the copyright holders nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Verisign, Inc. BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef LRUHASH_H_SYMBOLS +#define LRUHASH_H_SYMBOLS + +#define lruhash _getdns_lruhash +#define lruhash_bin _getdns_lruhash_bin +#define lruhash_entry _getdns_lruhash_entry +#define hashvalue_type _getdns_hashvalue_type +#define lruhash_sizefunc_type _getdns_lruhash_sizefunc_type +#define lruhash_compfunc_type _getdns_lruhash_compfunc_type +#define lruhash_delkeyfunc_type _getdns_lruhash_delkeyfunc_type +#define lruhash_deldatafunc_type _getdns_lruhash_deldatafunc_type +#define lruhash_markdelfunc_type _getdns_lruhash_markdelfunc_type +#define lruhash_create _getdns_lruhash_create +#define lruhash_delete _getdns_lruhash_delete +#define lruhash_clear _getdns_lruhash_clear +#define lruhash_insert _getdns_lruhash_insert +#define lruhash_lookup _getdns_lruhash_lookup +#define lru_touch _getdns_lru_touch +#define lruhash_setmarkdel _getdns_lruhash_setmarkdel + +#define lruhash_remove _getdns_lruhash_remove +#define bin_init _getdns_bin_init +#define bin_delete _getdns_bin_delete +#define bin_find_entry _getdns_bin_find_entry +#define bin_overflow_remove _getdns_bin_overflow_remove +#define bin_split _getdns_bin_split +#define reclaim_space _getdns_reclaim_space +#define table_grow _getdns_table_grow +#define lru_front _getdns_lru_front +#define lru_remove _getdns_lru_remove +#define lruhash_status _getdns_lruhash_status +#define lruhash_get_mem _getdns_lruhash_get_mem +#define lruhash_traverse _getdns_lruhash_traverse + +#include "util/orig-headers/lruhash.h" +#endif diff --git a/src/util/orig-headers/locks.h b/src/util/orig-headers/locks.h new file mode 100644 index 00000000..d86ee492 --- /dev/null +++ b/src/util/orig-headers/locks.h @@ -0,0 +1,313 @@ +/** + * util/locks.h - unbound locking primitives + * + * Copyright (c) 2007, NLnet Labs. All rights reserved. + * + * This software is open source. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of the NLNET LABS nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UTIL_LOCKS_H +#define UTIL_LOCKS_H + +/** + * \file + * Locking primitives. + * If pthreads is available, these are used. + * If no locking exists, they do nothing. + * + * The idea is to have different sorts of locks for different tasks. + * This allows the locking code to be ported more easily. + * + * Types of locks that are supported. + * o lock_rw: lock that has many readers and one writer (to a data entry). + * o lock_basic: simple mutex. Blocking, one person has access only. + * This lock is meant for non performance sensitive uses. + * o lock_quick: speed lock. For performance sensitive locking of critical + * sections. Could be implemented by a mutex or a spinlock. + * + * Also thread creation and deletion functions are defined here. + */ + +/* if you define your own LOCKRET before including locks.h, you can get most + * locking functions without the dependency on log_err. */ +#ifndef LOCKRET +#include "util/log.h" +/** + * The following macro is used to check the return value of the + * pthread calls. They return 0 on success and an errno on error. + * The errno is logged to the logfile with a descriptive comment. + */ +#define LOCKRET(func) do {\ + int lockret_err; \ + if( (lockret_err=(func)) != 0) \ + log_err("%s at %d could not " #func ": %s", \ + __FILE__, __LINE__, strerror(lockret_err)); \ + } while(0) +#endif + +/** DEBUG: use thread debug whenever possible */ +#if defined(HAVE_PTHREAD) && defined(HAVE_PTHREAD_SPINLOCK_T) && defined(ENABLE_LOCK_CHECKS) +# define USE_THREAD_DEBUG +#endif + +#ifdef USE_THREAD_DEBUG +/******************* THREAD DEBUG ************************/ +/* (some) checking; to detect races and deadlocks. */ +#include "testcode/checklocks.h" + +#else /* USE_THREAD_DEBUG */ +#define lock_protect(lock, area, size) /* nop */ +#define lock_unprotect(lock, area) /* nop */ +#define lock_get_mem(lock) (0) /* nothing */ +#define checklock_start() /* nop */ +#define checklock_stop() /* nop */ + +#ifdef HAVE_PTHREAD +#include + +/******************* PTHREAD ************************/ + +/** use pthread mutex for basic lock */ +typedef pthread_mutex_t lock_basic_type; +/** small front for pthread init func, NULL is default attrs. */ +#define lock_basic_init(lock) LOCKRET(pthread_mutex_init(lock, NULL)) +#define lock_basic_destroy(lock) LOCKRET(pthread_mutex_destroy(lock)) +#define lock_basic_lock(lock) LOCKRET(pthread_mutex_lock(lock)) +#define lock_basic_unlock(lock) LOCKRET(pthread_mutex_unlock(lock)) + +#ifndef HAVE_PTHREAD_RWLOCK_T +/** in case rwlocks are not supported, use a mutex. */ +typedef pthread_mutex_t lock_rw_type; +#define lock_rw_init(lock) LOCKRET(pthread_mutex_init(lock, NULL)) +#define lock_rw_destroy(lock) LOCKRET(pthread_mutex_destroy(lock)) +#define lock_rw_rdlock(lock) LOCKRET(pthread_mutex_lock(lock)) +#define lock_rw_wrlock(lock) LOCKRET(pthread_mutex_lock(lock)) +#define lock_rw_unlock(lock) LOCKRET(pthread_mutex_unlock(lock)) +#else /* HAVE_PTHREAD_RWLOCK_T */ +/** we use the pthread rwlock */ +typedef pthread_rwlock_t lock_rw_type; +/** small front for pthread init func, NULL is default attrs. */ +#define lock_rw_init(lock) LOCKRET(pthread_rwlock_init(lock, NULL)) +#define lock_rw_destroy(lock) LOCKRET(pthread_rwlock_destroy(lock)) +#define lock_rw_rdlock(lock) LOCKRET(pthread_rwlock_rdlock(lock)) +#define lock_rw_wrlock(lock) LOCKRET(pthread_rwlock_wrlock(lock)) +#define lock_rw_unlock(lock) LOCKRET(pthread_rwlock_unlock(lock)) +#endif /* HAVE_PTHREAD_RWLOCK_T */ + +#ifndef HAVE_PTHREAD_SPINLOCK_T +/** in case spinlocks are not supported, use a mutex. */ +typedef pthread_mutex_t lock_quick_type; +/** small front for pthread init func, NULL is default attrs. */ +#define lock_quick_init(lock) LOCKRET(pthread_mutex_init(lock, NULL)) +#define lock_quick_destroy(lock) LOCKRET(pthread_mutex_destroy(lock)) +#define lock_quick_lock(lock) LOCKRET(pthread_mutex_lock(lock)) +#define lock_quick_unlock(lock) LOCKRET(pthread_mutex_unlock(lock)) + +#else /* HAVE_PTHREAD_SPINLOCK_T */ +/** use pthread spinlock for the quick lock */ +typedef pthread_spinlock_t lock_quick_type; +/** + * allocate process private since this is available whether + * Thread Process-Shared Synchronization is supported or not. + * This means only threads inside this process may access the lock. + * (not threads from another process that shares memory). + * spinlocks are not supported on all pthread platforms. + */ +#define lock_quick_init(lock) LOCKRET(pthread_spin_init(lock, PTHREAD_PROCESS_PRIVATE)) +#define lock_quick_destroy(lock) LOCKRET(pthread_spin_destroy(lock)) +#define lock_quick_lock(lock) LOCKRET(pthread_spin_lock(lock)) +#define lock_quick_unlock(lock) LOCKRET(pthread_spin_unlock(lock)) + +#endif /* HAVE SPINLOCK */ + +/** Thread creation */ +typedef pthread_t ub_thread_type; +/** On alpine linux default thread stack size is 80 Kb. See +http://wiki.musl-libc.org/wiki/Functional_differences_from_glibc#Thread_stack_size +This is not enough and cause segfault. Other linux distros have 2 Mb at least. +Wrapper for set up thread stack size */ +#define PTHREADSTACKSIZE 2*1024*1024 +#define PTHREADCREATE(thr, stackrequired, func, arg) do {\ + pthread_attr_t attr; \ + size_t stacksize; \ + LOCKRET(pthread_attr_init(&attr)); \ + LOCKRET(pthread_attr_getstacksize(&attr, &stacksize)); \ + if (stacksize < stackrequired) { \ + LOCKRET(pthread_attr_setstacksize(&attr, stackrequired)); \ + LOCKRET(pthread_create(thr, &attr, func, arg)); \ + LOCKRET(pthread_attr_getstacksize(&attr, &stacksize)); \ + verbose(VERB_ALGO, "Thread stack size set to %u", (unsigned)stacksize); \ + } else {LOCKRET(pthread_create(thr, NULL, func, arg));} \ + } while(0) +/** Use wrapper for set thread stack size on attributes. */ +#define ub_thread_create(thr, func, arg) PTHREADCREATE(thr, PTHREADSTACKSIZE, func, arg) +/** get self id. */ +#define ub_thread_self() pthread_self() +/** wait for another thread to terminate */ +#define ub_thread_join(thread) LOCKRET(pthread_join(thread, NULL)) +typedef pthread_key_t ub_thread_key_type; +#define ub_thread_key_create(key, f) LOCKRET(pthread_key_create(key, f)) +#define ub_thread_key_set(key, v) LOCKRET(pthread_setspecific(key, v)) +#define ub_thread_key_get(key) pthread_getspecific(key) + +#else /* we do not HAVE_PTHREAD */ +#ifdef HAVE_SOLARIS_THREADS + +/******************* SOLARIS THREADS ************************/ +#include +#include + +typedef rwlock_t lock_rw_type; +#define lock_rw_init(lock) LOCKRET(rwlock_init(lock, USYNC_THREAD, NULL)) +#define lock_rw_destroy(lock) LOCKRET(rwlock_destroy(lock)) +#define lock_rw_rdlock(lock) LOCKRET(rw_rdlock(lock)) +#define lock_rw_wrlock(lock) LOCKRET(rw_wrlock(lock)) +#define lock_rw_unlock(lock) LOCKRET(rw_unlock(lock)) + +/** use basic mutex */ +typedef mutex_t lock_basic_type; +#define lock_basic_init(lock) LOCKRET(mutex_init(lock, USYNC_THREAD, NULL)) +#define lock_basic_destroy(lock) LOCKRET(mutex_destroy(lock)) +#define lock_basic_lock(lock) LOCKRET(mutex_lock(lock)) +#define lock_basic_unlock(lock) LOCKRET(mutex_unlock(lock)) + +/** No spinlocks in solaris threads API. Use a mutex. */ +typedef mutex_t lock_quick_type; +#define lock_quick_init(lock) LOCKRET(mutex_init(lock, USYNC_THREAD, NULL)) +#define lock_quick_destroy(lock) LOCKRET(mutex_destroy(lock)) +#define lock_quick_lock(lock) LOCKRET(mutex_lock(lock)) +#define lock_quick_unlock(lock) LOCKRET(mutex_unlock(lock)) + +/** Thread creation, create a default thread. */ +typedef thread_t ub_thread_type; +#define ub_thread_create(thr, func, arg) LOCKRET(thr_create(NULL, NULL, func, arg, NULL, thr)) +#define ub_thread_self() thr_self() +#define ub_thread_join(thread) LOCKRET(thr_join(thread, NULL, NULL)) +typedef thread_key_t ub_thread_key_type; +#define ub_thread_key_create(key, f) LOCKRET(thr_keycreate(key, f)) +#define ub_thread_key_set(key, v) LOCKRET(thr_setspecific(key, v)) +void* ub_thread_key_get(ub_thread_key_type key); + + +#else /* we do not HAVE_SOLARIS_THREADS and no PTHREADS */ +/******************* WINDOWS THREADS ************************/ +#ifdef HAVE_WINDOWS_THREADS +#include + +/* Use a mutex */ +typedef LONG lock_rw_type; +#define lock_rw_init(lock) lock_basic_init(lock) +#define lock_rw_destroy(lock) lock_basic_destroy(lock) +#define lock_rw_rdlock(lock) lock_basic_lock(lock) +#define lock_rw_wrlock(lock) lock_basic_lock(lock) +#define lock_rw_unlock(lock) lock_basic_unlock(lock) + +/** the basic lock is a mutex, implemented opaquely, for error handling. */ +typedef LONG lock_basic_type; +void lock_basic_init(lock_basic_type* lock); +void lock_basic_destroy(lock_basic_type* lock); +void lock_basic_lock(lock_basic_type* lock); +void lock_basic_unlock(lock_basic_type* lock); + +/** on windows no spinlock, use mutex too. */ +typedef LONG lock_quick_type; +#define lock_quick_init(lock) lock_basic_init(lock) +#define lock_quick_destroy(lock) lock_basic_destroy(lock) +#define lock_quick_lock(lock) lock_basic_lock(lock) +#define lock_quick_unlock(lock) lock_basic_unlock(lock) + +/** Thread creation, create a default thread. */ +typedef HANDLE ub_thread_type; +void ub_thread_create(ub_thread_type* thr, void* (*func)(void*), void* arg); +ub_thread_type ub_thread_self(void); +void ub_thread_join(ub_thread_type thr); +typedef DWORD ub_thread_key_type; +void ub_thread_key_create(ub_thread_key_type* key, void* f); +void ub_thread_key_set(ub_thread_key_type key, void* v); +void* ub_thread_key_get(ub_thread_key_type key); + +#else /* we do not HAVE_SOLARIS_THREADS, PTHREADS or WINDOWS_THREADS */ + +/******************* NO THREADS ************************/ +#define THREADS_DISABLED 1 +/** In case there is no thread support, define locks to do nothing */ +typedef int lock_rw_type; +#define lock_rw_init(lock) /* nop */ +#define lock_rw_destroy(lock) /* nop */ +#define lock_rw_rdlock(lock) /* nop */ +#define lock_rw_wrlock(lock) /* nop */ +#define lock_rw_unlock(lock) /* nop */ + +/** define locks to do nothing */ +typedef int lock_basic_type; +#define lock_basic_init(lock) /* nop */ +#define lock_basic_destroy(lock) /* nop */ +#define lock_basic_lock(lock) /* nop */ +#define lock_basic_unlock(lock) /* nop */ + +/** define locks to do nothing */ +typedef int lock_quick_type; +#define lock_quick_init(lock) /* nop */ +#define lock_quick_destroy(lock) /* nop */ +#define lock_quick_lock(lock) /* nop */ +#define lock_quick_unlock(lock) /* nop */ + +/** Thread creation, threads do not exist */ +typedef pid_t ub_thread_type; +/** ub_thread_create is simulated with fork (extremely heavy threads, + * with no shared memory). */ +#define ub_thread_create(thr, func, arg) \ + ub_thr_fork_create(thr, func, arg) +#define ub_thread_self() getpid() +#define ub_thread_join(thread) ub_thr_fork_wait(thread) +void ub_thr_fork_wait(ub_thread_type thread); +void ub_thr_fork_create(ub_thread_type* thr, void* (*func)(void*), void* arg); +typedef void* ub_thread_key_type; +#define ub_thread_key_create(key, f) (*(key)) = NULL +#define ub_thread_key_set(key, v) (key) = (v) +#define ub_thread_key_get(key) (key) + +#endif /* HAVE_WINDOWS_THREADS */ +#endif /* HAVE_SOLARIS_THREADS */ +#endif /* HAVE_PTHREAD */ +#endif /* USE_THREAD_DEBUG */ + +/** + * Block all signals for this thread. + * fatal exit on error. + */ +void ub_thread_blocksigs(void); + +/** + * unblock one signal for this thread. + */ +void ub_thread_sig_unblock(int sig); + +#endif /* UTIL_LOCKS_H */ diff --git a/src/util/orig-headers/lookup3.h b/src/util/orig-headers/lookup3.h new file mode 100644 index 00000000..59dad7c4 --- /dev/null +++ b/src/util/orig-headers/lookup3.h @@ -0,0 +1,71 @@ +/* + * util/storage/lookup3.h - header file for hashing functions. + * + * Copyright (c) 2007, NLnet Labs. All rights reserved. + * + * This software is open source. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of the NLNET LABS nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * \file + * + * This file contains header definitions for the hash functions we use. + * The hash functions are public domain (see lookup3.c). + */ + +#ifndef UTIL_STORAGE_LOOKUP3_H +#define UTIL_STORAGE_LOOKUP3_H + +/** + * Hash key made of 4byte chunks. + * @param k: the key, an array of uint32_t values + * @param length: the length of the key, in uint32_ts + * @param initval: the previous hash, or an arbitrary value + * @return: hash value. + */ +uint32_t hashword(const uint32_t *k, size_t length, uint32_t initval); + +/** + * Hash key data. + * @param k: the key, array of uint8_t + * @param length: the length of the key, in uint8_ts + * @param initval: the previous hash, or an arbitrary value + * @return: hash value. + */ +uint32_t hashlittle(const void *k, size_t length, uint32_t initval); + +/** + * Set the randomisation initial value, set this before threads start, + * and before hashing stuff (because it changes subsequent results). + * @param v: value + */ +void hash_set_raninit(uint32_t v); + +#endif /* UTIL_STORAGE_LOOKUP3_H */ diff --git a/src/util/orig-headers/lruhash.h b/src/util/orig-headers/lruhash.h new file mode 100644 index 00000000..c3937408 --- /dev/null +++ b/src/util/orig-headers/lruhash.h @@ -0,0 +1,414 @@ +/* + * util/storage/lruhash.h - hashtable, hash function, LRU keeping. + * + * Copyright (c) 2007, NLnet Labs. All rights reserved. + * + * This software is open source. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of the NLNET LABS nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * \file + * + * This file contains a hashtable with LRU keeping of entries. + * + * The hash table keeps a maximum memory size. Old entries are removed + * to make space for new entries. + * + * The locking strategy is as follows: + * o since (almost) every read also implies a LRU update, the + * hashtable lock is a spinlock, not rwlock. + * o the idea is to move every thread through the hash lock quickly, + * so that the next thread can access the lookup table. + * o User performs hash function. + * + * For read: + * o lock hashtable. + * o lookup hash bin. + * o lock hash bin. + * o find entry (if failed, unlock hash, unl bin, exit). + * o swizzle pointers for LRU update. + * o unlock hashtable. + * o lock entry (rwlock). + * o unlock hash bin. + * o work on entry. + * o unlock entry. + * + * To update an entry, gain writelock and change the entry. + * (the entry must keep the same hashvalue, so a data update.) + * (you cannot upgrade a readlock to a writelock, because the item may + * be deleted, it would cause race conditions. So instead, unlock and + * relookup it in the hashtable.) + * + * To delete an entry: + * o unlock the entry if you hold the lock already. + * o lock hashtable. + * o lookup hash bin. + * o lock hash bin. + * o find entry (if failed, unlock hash, unl bin, exit). + * o remove entry from hashtable bin overflow chain. + * o unlock hashtable. + * o lock entry (writelock). + * o unlock hash bin. + * o unlock entry (nobody else should be waiting for this lock, + * since you removed it from hashtable, and you got writelock while + * holding the hashbinlock so you are the only one.) + * Note you are only allowed to obtain a lock while holding hashbinlock. + * o delete entry. + * + * The above sequence is: + * o race free, works with read, write and delete. + * o but has a queue, imagine someone needing a writelock on an item. + * but there are still readlocks. The writelocker waits, but holds + * the hashbinlock. The next thread that comes in and needs the same + * hashbin will wait for the lock while holding the hashtable lock. + * thus halting the entire system on hashtable. + * This is because of the delete protection. + * Readlocks will be easier on the rwlock on entries. + * While the writer is holding writelock, similar problems happen with + * a reader or writer needing the same item. + * the scenario requires more than three threads. + * o so the queue length is 3 threads in a bad situation. The fourth is + * unable to use the hashtable. + * + * If you need to acquire locks on multiple items from the hashtable. + * o you MUST release all locks on items from the hashtable before + * doing the next lookup/insert/delete/whatever. + * o To acquire multiple items you should use a special routine that + * obtains the locks on those multiple items in one go. + */ + +#ifndef UTIL_STORAGE_LRUHASH_H +#define UTIL_STORAGE_LRUHASH_H +#include "util/locks.h" +struct lruhash_bin; +struct lruhash_entry; + +/** default start size for hash arrays */ +#define HASH_DEFAULT_STARTARRAY 1024 /* entries in array */ +/** default max memory for hash arrays */ +#define HASH_DEFAULT_MAXMEM 4*1024*1024 /* bytes */ + +/** the type of a hash value */ +typedef uint32_t hashvalue_type; + +/** + * Type of function that calculates the size of an entry. + * Result must include the size of struct lruhash_entry. + * Keys that are identical must also calculate to the same size. + * size = func(key, data). + */ +typedef size_t (*lruhash_sizefunc_type)(void*, void*); + +/** type of function that compares two keys. return 0 if equal. */ +typedef int (*lruhash_compfunc_type)(void*, void*); + +/** old keys are deleted. + * The RRset type has to revoke its ID number, markdel() is used first. + * This function is called: func(key, userarg) */ +typedef void (*lruhash_delkeyfunc_type)(void*, void*); + +/** old data is deleted. This function is called: func(data, userarg). */ +typedef void (*lruhash_deldatafunc_type)(void*, void*); + +/** mark a key as pending to be deleted (and not to be used by anyone). + * called: func(key) */ +typedef void (*lruhash_markdelfunc_type)(void*); + +/** + * Hash table that keeps LRU list of entries. + */ +struct lruhash { + /** lock for exclusive access, to the lookup array */ + lock_quick_type lock; + /** the size function for entries in this table */ + lruhash_sizefunc_type sizefunc; + /** the compare function for entries in this table. */ + lruhash_compfunc_type compfunc; + /** how to delete keys. */ + lruhash_delkeyfunc_type delkeyfunc; + /** how to delete data. */ + lruhash_deldatafunc_type deldatafunc; + /** how to mark a key pending deletion */ + lruhash_markdelfunc_type markdelfunc; + /** user argument for user functions */ + void* cb_arg; + + /** the size of the lookup array */ + size_t size; + /** size bitmask - since size is a power of 2 */ + int size_mask; + /** lookup array of bins */ + struct lruhash_bin* array; + + /** the lru list, start and end, noncyclical double linked list. */ + struct lruhash_entry* lru_start; + /** lru list end item (least recently used) */ + struct lruhash_entry* lru_end; + + /** the number of entries in the hash table. */ + size_t num; + /** the amount of space used, roughly the number of bytes in use. */ + size_t space_used; + /** the amount of space the hash table is maximally allowed to use. */ + size_t space_max; +}; + +/** + * A single bin with a linked list of entries in it. + */ +struct lruhash_bin { + /** + * Lock for exclusive access to the linked list + * This lock makes deletion of items safe in this overflow list. + */ + lock_quick_type lock; + /** linked list of overflow entries */ + struct lruhash_entry* overflow_list; +}; + +/** + * An entry into the hash table. + * To change overflow_next you need to hold the bin lock. + * To change the lru items you need to hold the hashtable lock. + * This structure is designed as part of key struct. And key pointer helps + * to get the surrounding structure. Data should be allocated on its own. + */ +struct lruhash_entry { + /** + * rwlock for access to the contents of the entry + * Note that it does _not_ cover the lru_ and overflow_ ptrs. + * Even with a writelock, you cannot change hash and key. + * You need to delete it to change hash or key. + */ + lock_rw_type lock; + /** next entry in overflow chain. Covered by hashlock and binlock. */ + struct lruhash_entry* overflow_next; + /** next entry in lru chain. covered by hashlock. */ + struct lruhash_entry* lru_next; + /** prev entry in lru chain. covered by hashlock. */ + struct lruhash_entry* lru_prev; + /** hash value of the key. It may not change, until entry deleted. */ + hashvalue_type hash; + /** key */ + void* key; + /** data */ + void* data; +}; + +/** + * Create new hash table. + * @param start_size: size of hashtable array at start, must be power of 2. + * @param maxmem: maximum amount of memory this table is allowed to use. + * @param sizefunc: calculates memory usage of entries. + * @param compfunc: compares entries, 0 on equality. + * @param delkeyfunc: deletes key. + * Calling both delkey and deldata will also free the struct lruhash_entry. + * Make it part of the key structure and delete it in delkeyfunc. + * @param deldatafunc: deletes data. + * @param arg: user argument that is passed to user function calls. + * @return: new hash table or NULL on malloc failure. + */ +struct lruhash* lruhash_create(size_t start_size, size_t maxmem, + lruhash_sizefunc_type sizefunc, lruhash_compfunc_type compfunc, + lruhash_delkeyfunc_type delkeyfunc, + lruhash_deldatafunc_type deldatafunc, void* arg); + +/** + * Delete hash table. Entries are all deleted. + * @param table: to delete. + */ +void lruhash_delete(struct lruhash* table); + +/** + * Clear hash table. Entries are all deleted, while locking them before + * doing so. At end the table is empty. + * @param table: to make empty. + */ +void lruhash_clear(struct lruhash* table); + +/** + * Insert a new element into the hashtable. + * If key is already present data pointer in that entry is updated. + * The space calculation function is called with the key, data. + * If necessary the least recently used entries are deleted to make space. + * If necessary the hash array is grown up. + * + * @param table: hash table. + * @param hash: hash value. User calculates the hash. + * @param entry: identifies the entry. + * If key already present, this entry->key is deleted immediately. + * But entry->data is set to NULL before deletion, and put into + * the existing entry. The data is then freed. + * @param data: the data. + * @param cb_override: if not null overrides the cb_arg for the deletefunc. + */ +void lruhash_insert(struct lruhash* table, hashvalue_type hash, + struct lruhash_entry* entry, void* data, void* cb_override); + +/** + * Lookup an entry in the hashtable. + * At the end of the function you hold a (read/write)lock on the entry. + * The LRU is updated for the entry (if found). + * @param table: hash table. + * @param hash: hash of key. + * @param key: what to look for, compared against entries in overflow chain. + * the hash value must be set, and must work with compare function. + * @param wr: set to true if you desire a writelock on the entry. + * with a writelock you can update the data part. + * @return: pointer to the entry or NULL. The entry is locked. + * The user must unlock the entry when done. + */ +struct lruhash_entry* lruhash_lookup(struct lruhash* table, + hashvalue_type hash, void* key, int wr); + +/** + * Touch entry, so it becomes the most recently used in the LRU list. + * Caller must hold hash table lock. The entry must be inserted already. + * @param table: hash table. + * @param entry: entry to make first in LRU. + */ +void lru_touch(struct lruhash* table, struct lruhash_entry* entry); + +/** + * Set the markdelfunction (or NULL) + */ +void lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md); + +/************************* Internal functions ************************/ +/*** these are only exposed for unit tests. ***/ + +/** + * Remove entry from hashtable. Does nothing if not found in hashtable. + * Delfunc is called for the entry. + * @param table: hash table. + * @param hash: hash of key. + * @param key: what to look for. + */ +void lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key); + +/** init the hash bins for the table */ +void bin_init(struct lruhash_bin* array, size_t size); + +/** delete the hash bin and entries inside it */ +void bin_delete(struct lruhash* table, struct lruhash_bin* bin); + +/** + * Find entry in hash bin. You must have locked the bin. + * @param table: hash table with function pointers. + * @param bin: hash bin to look into. + * @param hash: hash value to look for. + * @param key: key to look for. + * @return: the entry or NULL if not found. + */ +struct lruhash_entry* bin_find_entry(struct lruhash* table, + struct lruhash_bin* bin, hashvalue_type hash, void* key); + +/** + * Remove entry from bin overflow chain. + * You must have locked the bin. + * @param bin: hash bin to look into. + * @param entry: entry ptr that needs removal. + */ +void bin_overflow_remove(struct lruhash_bin* bin, + struct lruhash_entry* entry); + +/** + * Split hash bin into two new ones. Based on increased size_mask. + * Caller must hold hash table lock. + * At the end the routine acquires all hashbin locks (in the old array). + * This makes it wait for other threads to finish with the bins. + * So the bins are ready to be deleted after this function. + * @param table: hash table with function pointers. + * @param newa: new increased array. + * @param newmask: new lookup mask. + */ +void bin_split(struct lruhash* table, struct lruhash_bin* newa, + int newmask); + +/** + * Try to make space available by deleting old entries. + * Assumes that the lock on the hashtable is being held by caller. + * Caller must not hold bin locks. + * @param table: hash table. + * @param list: list of entries that are to be deleted later. + * Entries have been removed from the hash table and writelock is held. + */ +void reclaim_space(struct lruhash* table, struct lruhash_entry** list); + +/** + * Grow the table lookup array. Becomes twice as large. + * Caller must hold the hash table lock. Must not hold any bin locks. + * Tries to grow, on malloc failure, nothing happened. + * @param table: hash table. + */ +void table_grow(struct lruhash* table); + +/** + * Put entry at front of lru. entry must be unlinked from lru. + * Caller must hold hash table lock. + * @param table: hash table with lru head and tail. + * @param entry: entry to make most recently used. + */ +void lru_front(struct lruhash* table, struct lruhash_entry* entry); + +/** + * Remove entry from lru list. + * Caller must hold hash table lock. + * @param table: hash table with lru head and tail. + * @param entry: entry to remove from lru. + */ +void lru_remove(struct lruhash* table, struct lruhash_entry* entry); + +/** + * Output debug info to the log as to state of the hash table. + * @param table: hash table. + * @param id: string printed with table to identify the hash table. + * @param extended: set to true to print statistics on overflow bin lengths. + */ +void lruhash_status(struct lruhash* table, const char* id, int extended); + +/** + * Get memory in use now by the lruhash table. + * @param table: hash table. Will be locked before use. And unlocked after. + * @return size in bytes. + */ +size_t lruhash_get_mem(struct lruhash* table); + +/** + * Traverse a lruhash. Call back for every element in the table. + * @param h: hash table. Locked before use. + * @param wr: if true writelock is obtained on element, otherwise readlock. + * @param func: function for every element. Do not lock or unlock elements. + * @param arg: user argument to func. + */ +void lruhash_traverse(struct lruhash* h, int wr, + void (*func)(struct lruhash_entry*, void*), void* arg); + +#endif /* UTIL_STORAGE_LRUHASH_H */