PKG_NAME:=haproxy
PKG_VERSION:=1.8.13
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/
--- /dev/null
+commit ef9b56022c656df34044103a317b7b890ced6628
+Author: Willy Tarreau <w@1wt.eu>
+Date: Tue Aug 7 10:14:53 2018 +0200
+
+ BUG/MEDIUM: servers: check the queues once enabling a server
+
+ Commit 64cc49c ("MAJOR: servers: propagate server status changes
+ asynchronously.") heavily changed the way the server states are
+ updated since they became asynchronous. During this change, some
+ code was lost, which is used to shut down some sessions from a
+ backup server and to pick pending connections from a proxy once
+ a server is turned back from maintenance to ready state. The
+ effect is that when temporarily disabling a server, connections
+ stay in the backend's queue, and when re-enabling it, they are
+ not picked and they expire in the backend's queue. Now they're
+ properly picked again.
+
+ This fix must be backported to 1.8.
+
+ (cherry picked from commit 6a78e61694d69beb49c0e8486be9550f5e8b7d08)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/server.c b/src/server.c
+index 3d6a4093..fbed6cd4 100644
+--- a/src/server.c
++++ b/src/server.c
+@@ -4774,6 +4774,19 @@ void srv_update_status(struct server *s)
+ if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
+ set_backend_down(s->proxy);
+
++ /* If the server is set with "on-marked-up shutdown-backup-sessions",
++ * and it's not a backup server and its effective weight is > 0,
++ * then it can accept new connections, so we shut down all streams
++ * on all backup servers.
++ */
++ if ((s->onmarkedup & HANA_ONMARKEDUP_SHUTDOWNBACKUPSESSIONS) &&
++ !(s->flags & SRV_F_BACKUP) && s->next_eweight)
++ srv_shutdown_backup_streams(s->proxy, SF_ERR_UP);
++
++ /* check if we can handle some connections queued at the proxy. We
++ * will take as many as we can handle.
++ */
++ xferred = pendconn_grab_from_px(s);
+ }
+ else if (s->next_admin & SRV_ADMF_MAINT) {
+ /* remaining in maintenance mode, let's inform precisely about the
--- /dev/null
+commit 5550143cd6de58c6e733e389c6946e3dd26e89c0
+Author: Willy Tarreau <w@1wt.eu>
+Date: Tue Aug 7 10:44:58 2018 +0200
+
+ BUG/MEDIUM: queue: prevent a backup server from draining the proxy's connections
+
+ When switching back from a backup to an active server, the backup server
+ currently continues to drain the proxy's connections, which is a problem
+ because it's not expected to be able to pick them.
+
+ This patch ensures that a backup server will only pick backend connections
+ if there is no active server and it is the selected backup server or all
+ backup servers are supposed to be used.
+
+ This issue seems to have existed forever, so this fix should be backported
+ to all stable versions.
+
+ (cherry picked from commit a8694654ba021bf1e0e560a98ab5e70dc44d212e)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/queue.c b/src/queue.c
+index 1c730c75..b0b89426 100644
+--- a/src/queue.c
++++ b/src/queue.c
+@@ -117,7 +117,10 @@ static int pendconn_process_next_strm(struct server *srv, struct proxy *px)
+ }
+
+ ps_found:
+- if (srv_currently_usable(rsrv) && px->nbpend) {
++ if (srv_currently_usable(rsrv) && px->nbpend &&
++ (!(srv->flags & SRV_F_BACKUP) ||
++ (!px->srv_act &&
++ (srv == px->lbprm.fbck || (px->options & PR_O_USE_ALL_BK))))) {
+ struct pendconn *pp;
+
+ list_for_each_entry(pp, &px->pendconns, list) {
+@@ -287,6 +290,15 @@ int pendconn_grab_from_px(struct server *s)
+ if (!srv_currently_usable(s))
+ return 0;
+
++ /* if this is a backup server and there are active servers or at
++ * least another backup server was elected, then this one must
++ * not dequeue requests from the proxy.
++ */
++ if ((s->flags & SRV_F_BACKUP) &&
++ (s->proxy->srv_act ||
++ ((s != s->proxy->lbprm.fbck) && !(s->proxy->options & PR_O_USE_ALL_BK))))
++ return 0;
++
+ HA_SPIN_LOCK(PROXY_LOCK, &s->proxy->lock);
+ maxconn = srv_dynamic_maxconn(s);
+ list_for_each_entry_safe(p, pback, &s->proxy->pendconns, list) {
--- /dev/null
+commit 7d395954136c45e1533f355068399fec5e606db1
+Author: Baptiste Assmann <bedis9@gmail.com>
+Date: Fri Jun 22 13:03:50 2018 +0200
+
+ MINOR: dns: fix wrong score computation in dns_get_ip_from_response
+
+ dns_get_ip_from_response() is used to compare the caller current IP to
+ the IP available in the records returned by the DNS server.
+ A scoring system is in place to get the best IP address available.
+ That said, in the current implementation, there are a couple of issues:
+ 1. a comment does not match what the code does
+ 2. the code does not match what the commet says (score value is not
+ incremented with '2')
+
+ This patch fixes both issues.
+
+ Backport status: 1.8
+
+ (cherry picked from commit 84221b4e9010810cf93b7ad7a31d825fa9fc26bf)
+ [wt: Baptiste explicitly asked for this one to be backported to stable]
+ Cc: Baptiste <bedis9@gmail.com>
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/dns.c b/src/dns.c
+index 153a46b2..d8388ef1 100644
+--- a/src/dns.c
++++ b/src/dns.c
+@@ -1027,10 +1027,13 @@ int dns_get_ip_from_response(struct dns_response_packet *dns_p,
+ }
+
+ /* Check if the IP found in the record is already affected to a
+- * member of a group. If yes, the score should be incremented
++ * member of a group. If not, the score should be incremented
+ * by 2. */
+- if (owner && snr_check_ip_callback(owner, ip, &ip_type))
++ if (owner && snr_check_ip_callback(owner, ip, &ip_type)) {
+ continue;
++ } else {
++ score += 2;
++ }
+
+ /* Check for current ip matching. */
+ if (ip_type == currentip_sin_family &&
--- /dev/null
+commit c1bfcd002f54d1d84a99282d13f875c2649f3d70
+Author: Baptiste Assmann <bedis9@gmail.com>
+Date: Fri Jun 22 15:04:43 2018 +0200
+
+ MINOR: dns: new DNS options to allow/prevent IP address duplication
+
+ By default, HAProxy's DNS resolution at runtime ensure that there is no
+ IP address duplication in a backend (for servers being resolved by the
+ same hostname).
+ There are a few cases where people want, on purpose, to disable this
+ feature.
+
+ This patch introduces a couple of new server side options for this purpose:
+ "resolve-opts allow-dup-ip" or "resolve-opts prevent-dup-ip".
+
+ (cherry picked from commit 8e2d9430c0562ed74276d7f58e92706c384c0a36)
+
+ [wt: this is backported to 1.8 upon request from Baptiste because it offers
+ the option to revert to 1.7 behaviour, which some people depend on. The
+ address deduplication used on 1.8 apparently is not suited to everyone]
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/doc/configuration.txt b/doc/configuration.txt
+index 011533a0..1973bbf2 100644
+--- a/doc/configuration.txt
++++ b/doc/configuration.txt
+@@ -11623,6 +11623,40 @@ rise <count>
+ after <count> consecutive successful health checks. This value defaults to 2
+ if unspecified. See also the "check", "inter" and "fall" parameters.
+
++resolve-opts <option>,<option>,...
++ Comma separated list of options to apply to DNS resolution linked to this
++ server.
++
++ Available options:
++
++ * allow-dup-ip
++ By default, HAProxy prevents IP address duplication in a backend when DNS
++ resolution at runtime is in operation.
++ That said, for some cases, it makes sense that two servers (in the same
++ backend, being resolved by the same FQDN) have the same IP address.
++ For such case, simply enable this option.
++ This is the opposite of prevent-dup-ip.
++
++ * prevent-dup-ip
++ Ensure HAProxy's default behavior is enforced on a server: prevent re-using
++ an IP address already set to a server in the same backend and sharing the
++ same fqdn.
++ This is the opposite of allow-dup-ip.
++
++ Example:
++ backend b_myapp
++ default-server init-addr none resolvers dns
++ server s1 myapp.example.com:80 check resolve-opts allow-dup-ip
++ server s2 myapp.example.com:81 check resolve-opts allow-dup-ip
++
++ With the option allow-dup-ip set:
++ * if the nameserver returns a single IP address, then both servers will use
++ it
++ * If the nameserver returns 2 IP addresses, then each server will pick up a
++ different address
++
++ Default value: not set
++
+ resolve-prefer <family>
+ When DNS resolution is enabled for a server and multiple IP addresses from
+ different families are returned, HAProxy will prefer using an IP address
+diff --git a/include/types/dns.h b/include/types/dns.h
+index 9b1d08df..488d3996 100644
+--- a/include/types/dns.h
++++ b/include/types/dns.h
+@@ -245,6 +245,8 @@ struct dns_options {
+ } mask;
+ } pref_net[SRV_MAX_PREF_NET];
+ int pref_net_nb; /* The number of registered prefered networks. */
++ int accept_duplicate_ip; /* flag to indicate whether the associated object can use an IP address
++ already set to an other object of the same group */
+ };
+
+ /* Resolution structure associated to single server and used to manage name
+diff --git a/src/dns.c b/src/dns.c
+index d8388ef1..b31000a2 100644
+--- a/src/dns.c
++++ b/src/dns.c
+@@ -965,8 +965,10 @@ int dns_get_ip_from_response(struct dns_response_packet *dns_p,
+ int currentip_sel;
+ int j;
+ int score, max_score;
++ int allowed_duplicated_ip;
+
+ family_priority = dns_opts->family_prio;
++ allowed_duplicated_ip = dns_opts->accept_duplicate_ip;
+ *newip = newip4 = newip6 = NULL;
+ currentip_found = 0;
+ *newip_sin_family = AF_UNSPEC;
+@@ -1030,7 +1032,9 @@ int dns_get_ip_from_response(struct dns_response_packet *dns_p,
+ * member of a group. If not, the score should be incremented
+ * by 2. */
+ if (owner && snr_check_ip_callback(owner, ip, &ip_type)) {
+- continue;
++ if (!allowed_duplicated_ip) {
++ continue;
++ }
+ } else {
+ score += 2;
+ }
+diff --git a/src/server.c b/src/server.c
+index fbed6cd4..36a05e27 100644
+--- a/src/server.c
++++ b/src/server.c
+@@ -1506,6 +1506,7 @@ static void srv_settings_cpy(struct server *srv, struct server *src, int srv_tmp
+ if (src->resolvers_id != NULL)
+ srv->resolvers_id = strdup(src->resolvers_id);
+ srv->dns_opts.family_prio = src->dns_opts.family_prio;
++ srv->dns_opts.accept_duplicate_ip = src->dns_opts.accept_duplicate_ip;
+ if (srv->dns_opts.family_prio == AF_UNSPEC)
+ srv->dns_opts.family_prio = AF_INET6;
+ memcpy(srv->dns_opts.pref_net,
+@@ -2044,6 +2045,7 @@ int parse_server(const char *file, int linenum, char **args, struct proxy *curpr
+ newsrv = &curproxy->defsrv;
+ cur_arg = 1;
+ newsrv->dns_opts.family_prio = AF_INET6;
++ newsrv->dns_opts.accept_duplicate_ip = 0;
+ }
+
+ while (*args[cur_arg]) {
+@@ -2139,6 +2141,31 @@ int parse_server(const char *file, int linenum, char **args, struct proxy *curpr
+ newsrv->resolvers_id = strdup(args[cur_arg + 1]);
+ cur_arg += 2;
+ }
++ else if (!strcmp(args[cur_arg], "resolve-opts")) {
++ char *p, *end;
++
++ for (p = args[cur_arg + 1]; *p; p = end) {
++ /* cut on next comma */
++ for (end = p; *end && *end != ','; end++);
++ if (*end)
++ *(end++) = 0;
++
++ if (!strcmp(p, "allow-dup-ip")) {
++ newsrv->dns_opts.accept_duplicate_ip = 1;
++ }
++ else if (!strcmp(p, "prevent-dup-ip")) {
++ newsrv->dns_opts.accept_duplicate_ip = 0;
++ }
++ else {
++ ha_alert("parsing [%s:%d]: '%s' : unknown resolve-opts option '%s', supported methods are 'allow-dup-ip' and 'prevent-dup-ip'.\n",
++ file, linenum, args[cur_arg], p);
++ err_code |= ERR_ALERT | ERR_FATAL;
++ goto out;
++ }
++ }
++
++ cur_arg += 2;
++ }
+ else if (!strcmp(args[cur_arg], "resolve-prefer")) {
+ if (!strcmp(args[cur_arg + 1], "ipv4"))
+ newsrv->dns_opts.family_prio = AF_INET;
--- /dev/null
+commit d804e5e6b76bfd34576305ff33fe32aacb1fa5b7
+Author: Thierry FOURNIER <thierry.fournier@ozon.io>
+Date: Sat Jun 30 10:37:33 2018 +0200
+
+ BUG/MEDIUM: lua: possible CLOSE-WAIT state with '\n' headers
+
+ The Lua parser doesn't takes in account end-of-headers containing
+ only '\n'. It expects always '\r\n'. If a '\n' is processes the Lua
+ parser considers it miss 1 byte, and wait indefinitely for new data.
+
+ When the client reaches their timeout, it closes the connection.
+ This close is not detected and the connection keep in CLOSE-WAIT
+ state.
+
+ I guess that this patch fix only a visible part of the problem.
+ If the Lua HTTP parser wait for data, the timeout server or the
+ connectio closed by the client may stop the applet.
+
+ How reproduce the problem:
+
+ HAProxy conf:
+
+ global
+ lua-load bug38.lua
+ frontend frt
+ timeout client 2s
+ timeout server 2s
+ mode http
+ bind *:8080
+ http-request use-service lua.donothing
+
+ Lua conf
+
+ core.register_service("donothing", "http", function(applet) end)
+
+ Client request:
+
+ echo -ne 'GET / HTTP/1.1\n\n' | nc 127.0.0.1 8080
+
+ Look for CLOSE-WAIT in the connection with "netstat" or "ss". I
+ use this script:
+
+ while sleep 1; do ss | grep CLOSE-WAIT; done
+
+ This patch must be backported in 1.6, 1.7 and 1.8
+
+ Workaround: enable the "hard-stop-after" directive, and perform
+ periodic reload.
+
+ (cherry picked from commit 70d318ccb760ee25f166a75d163f38545f074ff1)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/hlua.c b/src/hlua.c
+index 54064860..4e50fa64 100644
+--- a/src/hlua.c
++++ b/src/hlua.c
+@@ -6594,13 +6594,13 @@ static void hlua_applet_http_fct(struct appctx *ctx)
+ len2 = 0;
+ if (ret == 0)
+ len1 = 0;
+- if (len1 + len2 < strm->txn->req.eoh + 2) {
++ if (len1 + len2 < strm->txn->req.eoh + strm->txn->req.eol) {
+ si_applet_cant_get(si);
+ return;
+ }
+
+ /* skip the requests bytes. */
+- co_skip(si_oc(si), strm->txn->req.eoh + 2);
++ co_skip(si_oc(si), strm->txn->req.eoh + strm->txn->req.eol);
+ }
+
+ /* Executes The applet if it is not done. */
--- /dev/null
+commit cd753064396f9563640fef940ce2a89e192042b1
+Author: Olivier Houchard <ohouchard@haproxy.com>
+Date: Thu Dec 21 17:13:05 2017 +0100
+
+ MINOR: threads: Introduce double-width CAS on x86_64 and arm.
+
+ Introduce double-width compare-and-swap on arches that support it, right now
+ x86_64, arm, and aarch64.
+ Also introduce functions to do memory barriers.
+
+ (cherry picked from commit f61f0cb95ffbfe403219226d427cd292ca79965a)
+ [wt: this is backported only to have the barriers for the new rdv point]
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/include/common/hathreads.h b/include/common/hathreads.h
+index 25cadf10..543ab95c 100644
+--- a/include/common/hathreads.h
++++ b/include/common/hathreads.h
+@@ -98,6 +98,19 @@ extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the threa
+
+ #define ha_sigmask(how, set, oldset) sigprocmask(how, set, oldset)
+
++
++static inline void __ha_barrier_load(void)
++{
++}
++
++static inline void __ha_barrier_store(void)
++{
++}
++
++static inline void __ha_barrier_full(void)
++{
++}
++
+ #else /* USE_THREAD */
+
+ #include <stdio.h>
+@@ -694,8 +707,147 @@ static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
+
+ #endif /* DEBUG_THREAD */
+
++#ifdef __x86_64__
++#define HA_HAVE_CAS_DW 1
++#define HA_CAS_IS_8B
++static __inline int
++__ha_cas_dw(void *target, void *compare, const void *set)
++{
++ char ret;
++
++ __asm __volatile("lock cmpxchg16b %0; setz %3"
++ : "+m" (*(void **)target),
++ "=a" (((void **)compare)[0]),
++ "=d" (((void **)compare)[1]),
++ "=q" (ret)
++ : "a" (((void **)compare)[0]),
++ "d" (((void **)compare)[1]),
++ "b" (((const void **)set)[0]),
++ "c" (((const void **)set)[1])
++ : "memory", "cc");
++ return (ret);
++}
++
++static __inline void
++__ha_barrier_load(void)
++{
++ __asm __volatile("lfence" ::: "memory");
++}
++
++static __inline void
++__ha_barrier_store(void)
++{
++ __asm __volatile("sfence" ::: "memory");
++}
++
++static __inline void
++__ha_barrier_full(void)
++{
++ __asm __volatile("mfence" ::: "memory");
++}
++
++#elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
++#define HA_HAVE_CAS_DW 1
++static __inline void
++__ha_barrier_load(void)
++{
++ __asm __volatile("dmb" ::: "memory");
++}
++
++static __inline void
++__ha_barrier_store(void)
++{
++ __asm __volatile("dsb" ::: "memory");
++}
++
++static __inline void
++__ha_barrier_full(void)
++{
++ __asm __volatile("dmb" ::: "memory");
++}
++
++static __inline int __ha_cas_dw(void *target, void *compare, void *set)
++{
++ uint64_t previous;
++ int tmp;
++
++ __asm __volatile("1:"
++ "ldrexd %0, [%4];"
++ "cmp %Q0, %Q2;"
++ "ittt eq;"
++ "cmpeq %R0, %R2;"
++ "strexdeq %1, %3, [%4];"
++ "cmpeq %1, #1;"
++ "beq 1b;"
++ : "=&r" (previous), "=&r" (tmp)
++ : "r" (compare), "r" (set), "r" (target)
++ : "memory", "cc");
++ tmp = (previous == *(uint64_t *)compare);
++ *(uint64_t *)compare = previous;
++ return (tmp);
++}
++
++#elif defined (__aarch64__)
++#define HA_HAVE_CAS_DW 1
++#define HA_CAS_IS_8B
++
++static __inline void
++__ha_barrier_load(void)
++{
++ __asm __volatile("dmb ishld" ::: "memory");
++}
++
++static __inline void
++__ha_barrier_store(void)
++{
++ __asm __volatile("dmb ishst" ::: "memory");
++}
++
++static __inline void
++__ha_barrier_full(void)
++{
++ __asm __volatile("dmb ish" ::: "memory");
++}
++
++static __inline int __ha_cas_dw(void *target, void *compare, void *set)
++{
++ void *value[2];
++ uint64_t tmp1, tmp2;
++
++ __asm__ __volatile__("1:"
++ "ldxp %0, %1, [%4];"
++ "mov %2, %0;"
++ "mov %3, %1;"
++ "eor %0, %0, %5;"
++ "eor %1, %1, %6;"
++ "orr %1, %0, %1;"
++ "mov %w0, #0;"
++ "cbnz %1, 2f;"
++ "stxp %w0, %7, %8, [%4];"
++ "cbnz %w0, 1b;"
++ "mov %w0, #1;"
++ "2:"
++ : "=&r" (tmp1), "=&r" (tmp2), "=&r" (value[0]), "=&r" (value[1])
++ : "r" (target), "r" (((void **)(compare))[0]), "r" (((void **)(compare))[1]), "r" (((void **)(set))[0]), "r" (((void **)(set))[1])
++ : "cc", "memory");
++
++ memcpy(compare, &value, sizeof(value));
++ return (tmp1);
++}
++
++#else
++#define __ha_barrier_load __sync_synchronize
++#define __ha_barrier_store __sync_synchronize
++#define __ha_barrier_full __sync_synchronize
++#endif
++
+ #endif /* USE_THREAD */
+
++static inline void __ha_compiler_barrier(void)
++{
++ __asm __volatile("" ::: "memory");
++}
++
+ /* Dummy I/O handler used by the sync pipe.*/
+ void thread_sync_io_handler(int fd);
+ int parse_nbthread(const char *arg, char **err);
--- /dev/null
+commit ad84851746243d85f9be59703e9bee0f5c5f8eba
+Author: Willy Tarreau <w@1wt.eu>
+Date: Wed Feb 14 14:16:28 2018 +0100
+
+ BUG/MEDIUM: threads: fix the double CAS implementation for ARMv7
+
+ Commit f61f0cb ("MINOR: threads: Introduce double-width CAS on x86_64
+ and arm.") introduced the double CAS. But the ARMv7 version is bogus,
+ it uses the value of the pointers instead of dereferencing them. When
+ lucky, it simply doesn't build due to impossible registers combinations.
+ Otherwise it will immediately crash at run time when facing traffic.
+
+ No backport is needed, this bug was introduced in 1.9-dev.
+
+ (cherry picked from commit 41ccb194d1d14669e0592e5373ef5776f099e82a)
+ [wt: backported only to keep safe code eventhough we don't use
+ this function in 1.8]
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/include/common/hathreads.h b/include/common/hathreads.h
+index 543ab95c..4e72848e 100644
+--- a/include/common/hathreads.h
++++ b/include/common/hathreads.h
+@@ -766,7 +766,7 @@ __ha_barrier_full(void)
+ __asm __volatile("dmb" ::: "memory");
+ }
+
+-static __inline int __ha_cas_dw(void *target, void *compare, void *set)
++static __inline int __ha_cas_dw(void *target, void *compare, const void *set)
+ {
+ uint64_t previous;
+ int tmp;
+@@ -780,7 +780,7 @@ static __inline int __ha_cas_dw(void *target, void *compare, void *set)
+ "cmpeq %1, #1;"
+ "beq 1b;"
+ : "=&r" (previous), "=&r" (tmp)
+- : "r" (compare), "r" (set), "r" (target)
++ : "r" (*(uint64_t *)compare), "r" (*(uint64_t *)set), "r" (target)
+ : "memory", "cc");
+ tmp = (previous == *(uint64_t *)compare);
+ *(uint64_t *)compare = previous;
--- /dev/null
+commit ece550d98e1c10017fb91ecfa0d19ae9d2dc45da
+Author: Willy Tarreau <w@1wt.eu>
+Date: Wed Aug 1 19:12:20 2018 +0200
+
+ MINOR: threads: add more consistency between certain variables in no-thread case
+
+ When threads are disabled, some variables such as tid and tid_bit are
+ still checked everywhere, the MAX_THREADS_MASK macro is ~0UL while
+ MAX_THREADS is 1, and the all_threads_mask variable is replaced with a
+ macro forced to zero. The compiler cannot optimize away all this code
+ involving checks on tid and tid_bit, and we end up in special cases
+ where all_threads_mask has to be specifically tested for being zero or
+ not. It is not even certain the code paths are always equivalent when
+ testing without threads and with nbthread 1.
+
+ Let's change this to make sure we always present a single thread when
+ threads are disabled, and have the relevant values declared as constants
+ so that the compiler can optimize all the tests away. Now we have
+ MAX_THREADS_MASK set to 1, all_threads_mask set to 1, tid set to zero
+ and tid_bit set to 1. Doing just this has removed 4 kB of code in the
+ no-thread case.
+
+ A few checks for all_threads_mask==0 have been removed since it never
+ happens anymore.
+
+ (cherry picked from commit 0c026f49e7348bce5b3c74be896ae208ae6e26a4)
+ [wt: the thread code feels safer with this, especially with the small updates
+ needed for the rdv point; missed one occurrence fixed by next patch]
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/include/common/hathreads.h b/include/common/hathreads.h
+index 4e72848e..7eb5d127 100644
+--- a/include/common/hathreads.h
++++ b/include/common/hathreads.h
+@@ -24,10 +24,6 @@
+
+ #include <common/config.h>
+
+-#define MAX_THREADS_MASK ((unsigned long)-1)
+-extern THREAD_LOCAL unsigned int tid; /* The thread id */
+-extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
+-
+ /* Note about all_threads_mask :
+ * - with threads support disabled, this symbol is defined as zero (0UL).
+ * - with threads enabled, this variable is never zero, it contains the mask
+@@ -37,7 +33,14 @@ extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the threa
+ #ifndef USE_THREAD
+
+ #define MAX_THREADS 1
+-#define all_threads_mask 0UL
++#define MAX_THREADS_MASK 1
++
++/* Only way found to replace variables with constants that are optimized away
++ * at build time.
++ */
++enum { all_threads_mask = 1UL };
++enum { tid_bit = 1UL };
++enum { tid = 0 };
+
+ #define __decl_hathreads(decl)
+
+@@ -98,6 +101,9 @@ extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the threa
+
+ #define ha_sigmask(how, set, oldset) sigprocmask(how, set, oldset)
+
++static inline void ha_set_tid(unsigned int tid)
++{
++}
+
+ static inline void __ha_barrier_load(void)
+ {
+@@ -120,6 +126,7 @@ static inline void __ha_barrier_full(void)
+ #include <import/plock.h>
+
+ #define MAX_THREADS LONGBITS
++#define MAX_THREADS_MASK ((unsigned long)-1)
+
+ #define __decl_hathreads(decl) decl
+
+@@ -223,10 +230,19 @@ void thread_exit_sync(void);
+ int thread_no_sync(void);
+ int thread_need_sync(void);
+
++extern THREAD_LOCAL unsigned int tid; /* The thread id */
++extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
+ extern volatile unsigned long all_threads_mask;
+
+ #define ha_sigmask(how, set, oldset) pthread_sigmask(how, set, oldset)
+
++/* sets the thread ID and the TID bit for the current thread */
++static inline void ha_set_tid(unsigned int data)
++{
++ tid = data;
++ tid_bit = (1UL << tid);
++}
++
+
+ #if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
+
+diff --git a/src/cfgparse.c b/src/cfgparse.c
+index 24349a59..d1474d4b 100644
+--- a/src/cfgparse.c
++++ b/src/cfgparse.c
+@@ -7652,11 +7652,11 @@ int check_config_validity()
+ nbproc = my_ffsl(bind_conf->bind_proc);
+
+ mask = bind_conf->bind_thread[nbproc - 1];
+- if (mask && !(mask & (all_threads_mask ? all_threads_mask : 1UL))) {
++ if (mask && !(mask & all_threads_mask)) {
+ unsigned long new_mask = 0;
+
+ while (mask) {
+- new_mask |= mask & (all_threads_mask ? all_threads_mask : 1UL);
++ new_mask |= mask & all_threads_mask;
+ mask >>= global.nbthread;
+ }
+
+diff --git a/src/haproxy.c b/src/haproxy.c
+index 9ba56623..e0186ff9 100644
+--- a/src/haproxy.c
++++ b/src/haproxy.c
+@@ -2448,8 +2448,7 @@ static void *run_thread_poll_loop(void *data)
+ struct per_thread_deinit_fct *ptdf;
+ __decl_hathreads(static HA_SPINLOCK_T start_lock);
+
+- tid = *((unsigned int *)data);
+- tid_bit = (1UL << tid);
++ ha_set_tid(*((unsigned int *)data));
+ tv_update_date(-1,-1);
+
+ list_for_each_entry(ptif, &per_thread_init_list, list) {
+diff --git a/src/hathreads.c b/src/hathreads.c
+index 0d0a0509..238cbb80 100644
+--- a/src/hathreads.c
++++ b/src/hathreads.c
+@@ -19,8 +19,6 @@
+ #include <common/standard.h>
+ #include <proto/fd.h>
+
+-THREAD_LOCAL unsigned int tid = 0;
+-THREAD_LOCAL unsigned long tid_bit = (1UL << 0);
+
+ /* Dummy I/O handler used by the sync pipe.*/
+ void thread_sync_io_handler(int fd)
+@@ -33,6 +31,9 @@ static HA_SPINLOCK_T sync_lock;
+ static int threads_sync_pipe[2];
+ static unsigned long threads_want_sync = 0;
+ volatile unsigned long all_threads_mask = 1; // nbthread 1 assumed by default
++THREAD_LOCAL unsigned int tid = 0;
++THREAD_LOCAL unsigned long tid_bit = (1UL << 0);
++
+
+ #if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
+ struct lock_stat lock_stats[LOCK_LABELS];
+@@ -130,7 +131,7 @@ void thread_enter_sync()
+ {
+ static volatile unsigned long barrier = 0;
+
+- if (!all_threads_mask)
++ if (!(all_threads_mask & (all_threads_mask - 1)))
+ return;
+
+ thread_sync_barrier(&barrier);
+@@ -146,7 +147,7 @@ void thread_exit_sync()
+ {
+ static volatile unsigned long barrier = 0;
+
+- if (!all_threads_mask)
++ if (!(all_threads_mask & (all_threads_mask - 1)))
+ return;
+
+ if (threads_want_sync & tid_bit)
--- /dev/null
+commit 7607ec0917c33ce511d46b791dfa5550451dd538
+Author: Willy Tarreau <w@1wt.eu>
+Date: Tue Aug 7 10:07:15 2018 +0200
+
+ BUG/MEDIUM: threads: fix the no-thread case after the change to the sync point
+
+ In commit 0c026f4 ("MINOR: threads: add more consistency between certain
+ variables in no-thread case"), we ensured that we don't have all_threads_mask
+ zeroed anymore. But one test was missed for the write() to the sync pipe.
+ This results in a situation where when running single-threaded, once a
+ server status changes, a wake-up message is written to the pipe and never
+ consumed, showing a 100% CPU usage.
+
+ No backport is needed.
+ (cherry picked from commit ab657ce2511c4e19b0191fbe1c98cfd823a3c5d6)
+ [wt: the offending patch was just backported as the previous one]
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/hathreads.c b/src/hathreads.c
+index 238cbb80..ba05fe27 100644
+--- a/src/hathreads.c
++++ b/src/hathreads.c
+@@ -71,7 +71,7 @@ void thread_sync_enable(void)
+ */
+ void thread_want_sync()
+ {
+- if (all_threads_mask) {
++ if (all_threads_mask & (all_threads_mask - 1)) {
+ if (threads_want_sync & tid_bit)
+ return;
+ if (HA_ATOMIC_OR(&threads_want_sync, tid_bit) == tid_bit)
--- /dev/null
+commit b505a8d719c208073959eff07f4af202ef49a8a1
+Author: Willy Tarreau <w@1wt.eu>
+Date: Thu Aug 2 10:16:17 2018 +0200
+
+ MEDIUM: hathreads: implement a more flexible rendez-vous point
+
+ The current synchronization point enforces certain restrictions which
+ are hard to workaround in certain areas of the code. The fact that the
+ critical code can only be called from the sync point itself is a problem
+ for some callback-driven parts. The "show fd" command for example is
+ fragile regarding this.
+
+ Also it is expensive in terms of CPU usage because it wakes every other
+ thread just to be sure all of them join to the rendez-vous point. It's a
+ problem because the sleeping threads would not need to be woken up just
+ to know they're doing nothing.
+
+ Here we implement a different approach. We keep track of harmless threads,
+ which are defined as those either doing nothing, or doing harmless things.
+ The rendez-vous is used "for others" as a way for a thread to isolate itself.
+ A thread then requests to be alone using thread_isolate() when approaching
+ the dangerous area, and then waits until all other threads are either doing
+ the same or are doing something harmless (typically polling). The function
+ only returns once the thread is guaranteed to be alone, and the critical
+ section is terminated using thread_release().
+
+ (cherry picked from commit 60b639ccbe919b86790267d7e45a39b75434acbe)
+ [wt: this will be needed to fix the "show fd" command with threads]
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/include/common/hathreads.h b/include/common/hathreads.h
+index 7eb5d127..f8fda87a 100644
+--- a/include/common/hathreads.h
++++ b/include/common/hathreads.h
+@@ -117,6 +117,27 @@ static inline void __ha_barrier_full(void)
+ {
+ }
+
++static inline void thread_harmless_now()
++{
++}
++
++static inline void thread_harmless_end()
++{
++}
++
++static inline void thread_isolate()
++{
++}
++
++static inline void thread_release()
++{
++}
++
++static inline unsigned long thread_isolated()
++{
++ return 1;
++}
++
+ #else /* USE_THREAD */
+
+ #include <stdio.h>
+@@ -229,10 +250,34 @@ void thread_enter_sync(void);
+ void thread_exit_sync(void);
+ int thread_no_sync(void);
+ int thread_need_sync(void);
++void thread_harmless_till_end();
++void thread_isolate();
++void thread_release();
+
+ extern THREAD_LOCAL unsigned int tid; /* The thread id */
+ extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
+ extern volatile unsigned long all_threads_mask;
++extern volatile unsigned long threads_want_rdv_mask;
++extern volatile unsigned long threads_harmless_mask;
++
++/* explanation for threads_want_rdv_mask and threads_harmless_mask :
++ * - threads_want_rdv_mask is a bit field indicating all threads that have
++ * requested a rendez-vous of other threads using thread_isolate().
++ * - threads_harmless_mask is a bit field indicating all threads that are
++ * currently harmless in that they promise not to access a shared resource.
++ *
++ * For a given thread, its bits in want_rdv and harmless can be translated like
++ * this :
++ *
++ * ----------+----------+----------------------------------------------------
++ * want_rdv | harmless | description
++ * ----------+----------+----------------------------------------------------
++ * 0 | 0 | thread not interested in RDV, possibly harmful
++ * 0 | 1 | thread not interested in RDV but harmless
++ * 1 | 1 | thread interested in RDV and waiting for its turn
++ * 1 | 0 | thread currently working isolated from others
++ * ----------+----------+----------------------------------------------------
++ */
+
+ #define ha_sigmask(how, set, oldset) pthread_sigmask(how, set, oldset)
+
+@@ -243,6 +288,38 @@ static inline void ha_set_tid(unsigned int data)
+ tid_bit = (1UL << tid);
+ }
+
++/* Marks the thread as harmless. Note: this must be true, i.e. the thread must
++ * not be touching any unprotected shared resource during this period. Usually
++ * this is called before poll(), but it may also be placed around very slow
++ * calls (eg: some crypto operations). Needs to be terminated using
++ * thread_harmless_end().
++ */
++static inline void thread_harmless_now()
++{
++ HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
++}
++
++/* Ends the harmless period started by thread_harmless_now(). Usually this is
++ * placed after the poll() call. If it is discovered that a job was running and
++ * is relying on the thread still being harmless, the thread waits for the
++ * other one to finish.
++ */
++static inline void thread_harmless_end()
++{
++ while (1) {
++ HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
++ if (likely((threads_want_rdv_mask & all_threads_mask) == 0))
++ break;
++ thread_harmless_till_end();
++ }
++}
++
++/* an isolated thread has harmless cleared and want_rdv set */
++static inline unsigned long thread_isolated()
++{
++ return threads_want_rdv_mask & ~threads_harmless_mask & tid_bit;
++}
++
+
+ #if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
+
+diff --git a/src/ev_epoll.c b/src/ev_epoll.c
+index adc15acd..09d1abb6 100644
+--- a/src/ev_epoll.c
++++ b/src/ev_epoll.c
+@@ -17,6 +17,7 @@
+ #include <common/config.h>
+ #include <common/debug.h>
+ #include <common/epoll.h>
++#include <common/hathreads.h>
+ #include <common/standard.h>
+ #include <common/ticks.h>
+ #include <common/time.h>
+@@ -153,6 +154,8 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
+ }
+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock);
+
++ thread_harmless_now();
++
+ /* compute the epoll_wait() timeout */
+ if (!exp)
+ wait_time = MAX_DELAY_MS;
+@@ -173,6 +176,8 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
+ tv_update_date(wait_time, status);
+ measure_idle();
+
++ thread_harmless_end();
++
+ /* process polled events */
+
+ for (count = 0; count < status; count++) {
+diff --git a/src/ev_kqueue.c b/src/ev_kqueue.c
+index 642de8b3..1f4762e6 100644
+--- a/src/ev_kqueue.c
++++ b/src/ev_kqueue.c
+@@ -19,6 +19,7 @@
+
+ #include <common/compat.h>
+ #include <common/config.h>
++#include <common/hathreads.h>
+ #include <common/ticks.h>
+ #include <common/time.h>
+ #include <common/tools.h>
+@@ -127,6 +128,8 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
+ }
+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock);
+
++ thread_harmless_now();
++
+ if (changes) {
+ #ifdef EV_RECEIPT
+ kev[0].flags |= EV_RECEIPT;
+@@ -169,6 +172,8 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
+ tv_update_date(delta_ms, status);
+ measure_idle();
+
++ thread_harmless_end();
++
+ for (count = 0; count < status; count++) {
+ unsigned int n = 0;
+ fd = kev[count].ident;
+diff --git a/src/ev_poll.c b/src/ev_poll.c
+index c913ced2..7da992d6 100644
+--- a/src/ev_poll.c
++++ b/src/ev_poll.c
+@@ -19,6 +19,7 @@
+
+ #include <common/compat.h>
+ #include <common/config.h>
++#include <common/hathreads.h>
+ #include <common/ticks.h>
+ #include <common/time.h>
+
+@@ -149,6 +150,9 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
+
+ }
+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock);
++
++ thread_harmless_now();
++
+ fd_nbupdt = 0;
+
+ nbfd = 0;
+@@ -200,6 +204,8 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
+ tv_update_date(wait_time, status);
+ measure_idle();
+
++ thread_harmless_end();
++
+ for (count = 0; status > 0 && count < nbfd; count++) {
+ unsigned int n;
+ int e = poll_events[count].revents;
+diff --git a/src/ev_select.c b/src/ev_select.c
+index bde923ea..9daf74d9 100644
+--- a/src/ev_select.c
++++ b/src/ev_select.c
+@@ -16,6 +16,7 @@
+
+ #include <common/compat.h>
+ #include <common/config.h>
++#include <common/hathreads.h>
+ #include <common/ticks.h>
+ #include <common/time.h>
+
+@@ -123,6 +124,9 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
+
+ }
+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock);
++
++ thread_harmless_now();
++
+ fd_nbupdt = 0;
+
+ /* let's restore fdset state */
+@@ -171,6 +175,8 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
+ tv_update_date(delta_ms, status);
+ measure_idle();
+
++ thread_harmless_end();
++
+ if (status <= 0)
+ return;
+
+diff --git a/src/hathreads.c b/src/hathreads.c
+index ba05fe27..97ed31c5 100644
+--- a/src/hathreads.c
++++ b/src/hathreads.c
+@@ -30,6 +30,8 @@ void thread_sync_io_handler(int fd)
+ static HA_SPINLOCK_T sync_lock;
+ static int threads_sync_pipe[2];
+ static unsigned long threads_want_sync = 0;
++volatile unsigned long threads_want_rdv_mask = 0;
++volatile unsigned long threads_harmless_mask = 0;
+ volatile unsigned long all_threads_mask = 1; // nbthread 1 assumed by default
+ THREAD_LOCAL unsigned int tid = 0;
+ THREAD_LOCAL unsigned long tid_bit = (1UL << 0);
+@@ -163,6 +165,68 @@ void thread_exit_sync()
+ thread_sync_barrier(&barrier);
+ }
+
++/* Marks the thread as harmless until the last thread using the rendez-vous
++ * point quits. Given that we can wait for a long time, sched_yield() is used
++ * when available to offer the CPU resources to competing threads if needed.
++ */
++void thread_harmless_till_end()
++{
++ HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
++ while (threads_want_rdv_mask & all_threads_mask) {
++#if _POSIX_PRIORITY_SCHEDULING
++ sched_yield();
++#else
++ pl_cpu_relax();
++#endif
++ }
++}
++
++/* Isolates the current thread : request the ability to work while all other
++ * threads are harmless. Only returns once all of them are harmless, with the
++ * current thread's bit in threads_harmless_mask cleared. Needs to be completed
++ * using thread_release().
++ */
++void thread_isolate()
++{
++ unsigned long old;
++
++ HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
++ __ha_barrier_store();
++ HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
++
++ /* wait for all threads to become harmless */
++ old = threads_harmless_mask;
++ while (1) {
++ if (unlikely((old & all_threads_mask) != all_threads_mask))
++ old = threads_harmless_mask;
++ else if (HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
++ break;
++
++#if _POSIX_PRIORITY_SCHEDULING
++ sched_yield();
++#else
++ pl_cpu_relax();
++#endif
++ }
++ /* one thread gets released at a time here, with its harmess bit off.
++ * The loss of this bit makes the other one continue to spin while the
++ * thread is working alone.
++ */
++}
++
++/* Cancels the effect of thread_isolate() by releasing the current thread's bit
++ * in threads_want_rdv_mask and by marking this thread as harmless until the
++ * last worker finishes.
++ */
++void thread_release()
++{
++ while (1) {
++ HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
++ if (!(threads_want_rdv_mask & all_threads_mask))
++ break;
++ thread_harmless_till_end();
++ }
++}
+
+ __attribute__((constructor))
+ static void __hathreads_init(void)
--- /dev/null
+commit f41ca2546e3c35cc389f45428341ec03dade314d
+Author: Willy Tarreau <w@1wt.eu>
+Date: Thu Aug 2 11:05:48 2018 +0200
+
+ BUG/MEDIUM: cli: make "show fd" thread-safe
+
+ The "show fd" command was implemented as a debugging aid but it's not
+ thread safe. Its features have grown, it can now dump some mux-specific
+ parts and is being used in production to capture some useful debugging
+ traces. But it will quickly crash the process when used during an H2 load
+ test for example, especially when haproxy is built with the DEBUG_UAF
+ option. It cannot afford not to be thread safe anymore. Let's make use
+ of the new rendez-vous point using thread_isolate() / thread_release()
+ to ensure that the data being dumped are not changing under us. The dump
+ becomes slightly slower under load but now it's safe.
+
+ This should be backported to 1.8 along with the rendez-vous point code
+ once considered stable enough.
+ (cherry picked from commit bf9fd650883b23604b7cd4aabf04fc0c4c8fe7c7)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/cli.c b/src/cli.c
+index 233c2323..8344fe10 100644
+--- a/src/cli.c
++++ b/src/cli.c
+@@ -787,10 +787,14 @@ static int cli_io_handler_show_fd(struct appctx *appctx)
+ void *ctx = NULL;
+ uint32_t conn_flags = 0;
+
++ thread_isolate();
++
+ fdt = fdtab[fd];
+
+- if (!fdt.owner)
++ if (!fdt.owner) {
++ thread_release();
+ goto skip; // closed
++ }
+
+ if (fdt.iocb == conn_fd_handler) {
+ conn_flags = ((struct connection *)fdt.owner)->flags;
+@@ -855,6 +859,8 @@ static int cli_io_handler_show_fd(struct appctx *appctx)
+ li->bind_conf->frontend->id);
+ }
+
++ thread_release();
++
+ chunk_appendf(&trash, "\n");
+
+ if (ci_putchk(si_ic(si), &trash) == -1) {
--- /dev/null
+commit 8c2e3b35a951c41b80efe4c3368d1244bab2bea4
+Author: Emeric Brun <ebrun@haproxy.com>
+Date: Thu Aug 16 11:36:40 2018 +0200
+
+ BUG/MINOR: ssl: empty connections reported as errors.
+
+ Empty connection is reported as handshake error
+ even if dont-log-null is specified.
+
+ This bug affect is a regression du to:
+
+ BUILD: ssl: fix to build (again) with boringssl
+
+ New openssl 1.1.1 defines OPENSSL_NO_HEARTBEATS as boring ssl
+ so the test was replaced by OPENSSL_IS_BORINGSSL
+
+ This fix should be backported on 1.8
+
+ (cherry picked from commit 77e8919fc6f382f3a7facdc814b8618b8987200f)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/ssl_sock.c b/src/ssl_sock.c
+index 7edfb799..49389f01 100644
+--- a/src/ssl_sock.c
++++ b/src/ssl_sock.c
+@@ -5121,7 +5121,7 @@ int ssl_sock_handshake(struct connection *conn, unsigned int flag)
+ if (!errno && conn->flags & CO_FL_WAIT_L4_CONN)
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ if (!conn->err_code) {
+-#ifdef OPENSSL_NO_HEARTBEATS /* BoringSSL */
++#ifdef OPENSSL_IS_BORINGSSL /* BoringSSL */
+ conn->err_code = CO_ER_SSL_HANDSHAKE;
+ #else
+ int empty_handshake;
+@@ -5205,7 +5205,7 @@ check_error:
+ if (!errno && conn->flags & CO_FL_WAIT_L4_CONN)
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ if (!conn->err_code) {
+-#ifdef OPENSSL_NO_HEARTBEATS /* BoringSSL */
++#ifdef OPENSSL_IS_BORINGSSL /* BoringSSL */
+ conn->err_code = CO_ER_SSL_HANDSHAKE;
+ #else
+ int empty_handshake;
--- /dev/null
+commit 9923082f94e0af83d03e030f4274d3db374b248c
+Author: Emeric Brun <ebrun@haproxy.com>
+Date: Thu Aug 16 15:11:12 2018 +0200
+
+ BUG/MEDIUM: ssl: fix missing error loading a keytype cert from a bundle.
+
+ If there was an issue loading a keytype's part of a bundle, the bundle
+ was implicitly ignored without errors.
+
+ This patch should be backported in 1.8 (and perhaps 1.7)
+
+ (cherry picked from commit eb155b6ca6c1a8aaffa30285d453909b97979f5f)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/ssl_sock.c b/src/ssl_sock.c
+index 49389f01..9f0ff1f0 100644
+--- a/src/ssl_sock.c
++++ b/src/ssl_sock.c
+@@ -3475,7 +3475,7 @@ int ssl_sock_load_cert(char *path, struct bind_conf *bind_conf, char **err)
+ }
+
+ snprintf(fp, sizeof(fp), "%s/%s", path, dp);
+- ssl_sock_load_multi_cert(fp, bind_conf, NULL, NULL, 0, err);
++ cfgerr += ssl_sock_load_multi_cert(fp, bind_conf, NULL, NULL, 0, err);
+
+ /* Successfully processed the bundle */
+ goto ignore_entry;
--- /dev/null
+commit 399714287a04d6b453ba95e4a3904a7644827d0b
+Author: Emeric Brun <ebrun@haproxy.com>
+Date: Thu Aug 16 15:14:12 2018 +0200
+
+ BUG/MEDIUM: ssl: loading dh param from certifile causes unpredictable error.
+
+ If the dh parameter is not found, the openssl's error global
+ stack was not correctly cleared causing unpredictable error
+ during the following parsing (chain cert parsing for instance).
+
+ This patch should be backported in 1.8 (and perhaps 1.7)
+
+ (cherry picked from commit e1b4ed4352619f985d7d65f5d95a830ef5775c46)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/ssl_sock.c b/src/ssl_sock.c
+index 9f0ff1f0..9be2fc4c 100644
+--- a/src/ssl_sock.c
++++ b/src/ssl_sock.c
+@@ -2597,6 +2597,8 @@ end:
+ if (in)
+ BIO_free(in);
+
++ ERR_clear_error();
++
+ return dh;
+ }
+
--- /dev/null
+commit a1110e24e5be53ba5fe9ab82372c02a60da06cf9
+Author: Emeric Brun <ebrun@haproxy.com>
+Date: Tue Jul 17 09:47:07 2018 -0400
+
+ BUG/MINOR: map: fix map_regm with backref
+
+ Due to a cascade of get_trash_chunk calls the sample is
+ corrupted when we want to read it.
+
+ The fix consist to use a temporary chunk to copy the sample
+ value and use it.
+
+ (cherry picked from commit 271022150d7961b9aa39dbfd88e0c6a4bc48c3ee)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/map.c b/src/map.c
+index a9a1e53c..da399088 100644
+--- a/src/map.c
++++ b/src/map.c
+@@ -184,10 +184,27 @@ static int sample_conv_map(const struct arg *arg_p, struct sample *smp, void *pr
+ if (pat->data) {
+ /* In the regm case, merge the sample with the input. */
+ if ((long)private == PAT_MATCH_REGM) {
++ struct chunk *tmptrash;
++
++ /* Copy the content of the sample because it could
++ be scratched by incoming get_trash_chunk */
++ tmptrash = alloc_trash_chunk();
++ if (!tmptrash)
++ return 0;
++
++ tmptrash->len = smp->data.u.str.len;
++ if (tmptrash->len > (tmptrash->size-1))
++ tmptrash->len = tmptrash->size-1;
++
++ memcpy(tmptrash->str, smp->data.u.str.str, tmptrash->len);
++ tmptrash->str[tmptrash->len] = 0;
++
+ str = get_trash_chunk();
+- str->len = exp_replace(str->str, str->size, smp->data.u.str.str,
++ str->len = exp_replace(str->str, str->size, tmptrash->str,
+ pat->data->u.str.str,
+ (regmatch_t *)smp->ctx.a[0]);
++
++ free_trash_chunk(tmptrash);
+ if (str->len == -1)
+ return 0;
+ smp->data.u.str = *str;
--- /dev/null
+commit 29a43c20faa06100ade61fc24a5ee3bddfa3021a
+Author: Lukas Tribus <lukas@ltri.eu>
+Date: Tue Aug 14 11:39:35 2018 +0200
+
+ DOC: dns: explain set server ... fqdn requires resolver
+
+ Abhishek Gupta reported on discourse that set server [...] fqdn always
+ fails. Further investigation showed that this requires the internal
+ DNS resolver to be configured. Add this requirement to the docs.
+
+ Must be backported to 1.8.
+
+ (cherry picked from commit c5dd5a500a237780eb9ab6e7069949cb19b6ff7d)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/doc/management.txt b/doc/management.txt
+index 68a17c25..46e7fd07 100644
+--- a/doc/management.txt
++++ b/doc/management.txt
+@@ -1675,7 +1675,8 @@ set server <backend>/<server> weight <weight>[%]
+ equivalent of the "set weight" command below.
+
+ set server <backend>/<server> fqdn <FQDN>
+- Change a server's FQDN to the value passed in argument.
++ Change a server's FQDN to the value passed in argument. This requires the
++ internal run-time DNS resolver to be configured and enabled for this server.
+
+ set severity-output [ none | number | string ]
+ Change the severity output format of the stats socket connected to for the
--- /dev/null
+commit 54aecf18aeabe09bccf8db5e34b99bc36d468088
+Author: Bertrand Jacquin <bertrand@jacquin.bzh>
+Date: Tue Aug 14 00:56:13 2018 +0100
+
+ DOC: ssl: Use consistent naming for TLS protocols
+
+ In most cases, "TLSv1.x" naming is used across and documentation, lazy
+ people tend to grep too much and may not find what they are looking for.
+
+ Fixing people is hard.
+
+ (cherry picked from commit a25282bb399bfad8ed04b494b567fe97f0a58d65)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/doc/configuration.txt b/doc/configuration.txt
+index 1973bbf2..43e28785 100644
+--- a/doc/configuration.txt
++++ b/doc/configuration.txt
+@@ -10447,7 +10447,7 @@ accept-proxy
+ setting of which client is allowed to use the protocol.
+
+ allow-0rtt
+- Allow receiving early data when using TLS 1.3. This is disabled by default,
++ Allow receiving early data when using TLSv1.3. This is disabled by default,
+ due to security considerations.
+
+ alpn <protocols>
+diff --git a/src/ssl_sock.c b/src/ssl_sock.c
+index 9be2fc4c..0b49e0b4 100644
+--- a/src/ssl_sock.c
++++ b/src/ssl_sock.c
+@@ -1986,7 +1986,7 @@ static void ctx_set_TLSv12_func(SSL_CTX *ctx, set_context_func c) {
+ : SSL_CTX_set_ssl_version(ctx, TLSv1_2_client_method());
+ #endif
+ }
+-/* TLS 1.2 is the last supported version in this context. */
++/* TLSv1.2 is the last supported version in this context. */
+ static void ctx_set_TLSv13_func(SSL_CTX *ctx, set_context_func c) {}
+ /* Unusable in this context. */
+ static void ssl_set_SSLv3_func(SSL *ssl, set_context_func c) {}
+@@ -2187,7 +2187,7 @@ static int ssl_sock_switchctx_cbk(SSL *ssl, int *al, void *arg)
+ break;
+ }
+ } else {
+- /* without TLSEXT_TYPE_signature_algorithms extension (< TLS 1.2) */
++ /* without TLSEXT_TYPE_signature_algorithms extension (< TLSv1.2) */
+ has_rsa = 1;
+ }
+ if (has_ecdsa_sig) { /* in very rare case: has ecdsa sign but not a ECDSA cipher */
--- /dev/null
+commit 947a3f71ad9733dde6645edb91b6becb3ee51e7c
+Author: Cyril Bonté <cyril.bonte@free.fr>
+Date: Fri Aug 17 23:51:02 2018 +0200
+
+ BUG/MEDIUM: lua: socket timeouts are not applied
+
+ Sachin Shetty reported that socket timeouts set in LUA code have no effect.
+ Indeed, connect timeout is never modified and is always set to its default,
+ set to 5 seconds. Currently, this patch will apply the specified timeout
+ value to the connect timeout.
+ For the read and write timeouts, the issue is that the timeout is updated but
+ the expiration dates were not updated.
+
+ This patch should be backported up to the 1.6 branch.
+
+ (cherry picked from commit 7bb634549794298fc701d33efd93c7289dcf9cb7)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/hlua.c b/src/hlua.c
+index 4e50fa64..daf775fc 100644
+--- a/src/hlua.c
++++ b/src/hlua.c
+@@ -2566,10 +2566,19 @@ __LJMP static int hlua_socket_settimeout(struct lua_State *L)
+ si = appctx->owner;
+ s = si_strm(si);
+
++ s->sess->fe->timeout.connect = tmout;
+ s->req.rto = tmout;
+ s->req.wto = tmout;
+ s->res.rto = tmout;
+ s->res.wto = tmout;
++ s->req.rex = tick_add_ifset(now_ms, tmout);
++ s->req.wex = tick_add_ifset(now_ms, tmout);
++ s->res.rex = tick_add_ifset(now_ms, tmout);
++ s->res.wex = tick_add_ifset(now_ms, tmout);
++
++ s->task->expire = tick_add_ifset(now_ms, tmout);
++ task_queue(s->task);
++
+ xref_unlock(&socket->xref, peer);
+
+ lua_pushinteger(L, 1);
--- /dev/null
+commit 3c42f13badd149c9c3152d7b2e653bde5da7c17a
+Author: Willy Tarreau <w@1wt.eu>
+Date: Tue Aug 21 14:50:44 2018 +0200
+
+ BUG/MEDIUM: cli/threads: protect all "proxy" commands against concurrent updates
+
+ The proxy-related commands like "{enable|disable|shutdown} frontend",
+ "{enable|disable} dynamic-cookie", "set dynamic-cookie-key" were not
+ protected against concurrent accesses making their use dangerous with
+ threads.
+
+ This patch must be backported to 1.8.
+
+ (cherry picked from commit a275a3710eaa365150fe89e2e7a8fbdce87bb30e)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/proxy.c b/src/proxy.c
+index 4437b703..8926ba8b 100644
+--- a/src/proxy.c
++++ b/src/proxy.c
+@@ -1560,7 +1560,10 @@ static int cli_io_handler_show_backend(struct appctx *appctx)
+ return 1;
+ }
+
+-/* Parses the "enable dynamic-cookies backend" directive, it always returns 1 */
++/* Parses the "enable dynamic-cookies backend" directive, it always returns 1.
++ *
++ * Grabs the proxy lock and each server's lock.
++ */
+ static int cli_parse_enable_dyncookie_backend(char **args, struct appctx *appctx, void *private)
+ {
+ struct proxy *px;
+@@ -1573,15 +1576,25 @@ static int cli_parse_enable_dyncookie_backend(char **args, struct appctx *appctx
+ if (!px)
+ return 1;
+
++ HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
++
+ px->ck_opts |= PR_CK_DYNAMIC;
+
+- for (s = px->srv; s != NULL; s = s->next)
++ for (s = px->srv; s != NULL; s = s->next) {
++ HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+ srv_set_dyncookie(s);
++ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
++ }
++
++ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+
+ return 1;
+ }
+
+-/* Parses the "disable dynamic-cookies backend" directive, it always returns 1 */
++/* Parses the "disable dynamic-cookies backend" directive, it always returns 1.
++ *
++ * Grabs the proxy lock and each server's lock.
++ */
+ static int cli_parse_disable_dyncookie_backend(char **args, struct appctx *appctx, void *private)
+ {
+ struct proxy *px;
+@@ -1594,19 +1607,28 @@ static int cli_parse_disable_dyncookie_backend(char **args, struct appctx *appct
+ if (!px)
+ return 1;
+
++ HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
++
+ px->ck_opts &= ~PR_CK_DYNAMIC;
+
+ for (s = px->srv; s != NULL; s = s->next) {
++ HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+ if (!(s->flags & SRV_F_COOKIESET)) {
+ free(s->cookie);
+ s->cookie = NULL;
+ }
++ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+ }
+
++ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
++
+ return 1;
+ }
+
+-/* Parses the "set dynamic-cookie-key backend" directive, it always returns 1 */
++/* Parses the "set dynamic-cookie-key backend" directive, it always returns 1.
++ *
++ * Grabs the proxy lock and each server's lock.
++ */
+ static int cli_parse_set_dyncookie_key_backend(char **args, struct appctx *appctx, void *private)
+ {
+ struct proxy *px;
+@@ -1634,16 +1656,27 @@ static int cli_parse_set_dyncookie_key_backend(char **args, struct appctx *appct
+ appctx->st0 = CLI_ST_PRINT;
+ return 1;
+ }
++
++ HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
++
+ free(px->dyncookie_key);
+ px->dyncookie_key = newkey;
+
+- for (s = px->srv; s != NULL; s = s->next)
++ for (s = px->srv; s != NULL; s = s->next) {
++ HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+ srv_set_dyncookie(s);
++ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
++ }
++
++ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+
+ return 1;
+ }
+
+-/* Parses the "set maxconn frontend" directive, it always returns 1 */
++/* Parses the "set maxconn frontend" directive, it always returns 1.
++ *
++ * Grabs the proxy lock.
++ */
+ static int cli_parse_set_maxconn_frontend(char **args, struct appctx *appctx, void *private)
+ {
+ struct proxy *px;
+@@ -1675,6 +1708,8 @@ static int cli_parse_set_maxconn_frontend(char **args, struct appctx *appctx, vo
+ /* OK, the value is fine, so we assign it to the proxy and to all of
+ * its listeners. The blocked ones will be dequeued.
+ */
++ HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
++
+ px->maxconn = v;
+ list_for_each_entry(l, &px->conf.listeners, by_fe) {
+ l->maxconn = v;
+@@ -1685,10 +1720,15 @@ static int cli_parse_set_maxconn_frontend(char **args, struct appctx *appctx, vo
+ if (px->maxconn > px->feconn && !LIST_ISEMPTY(&px->listener_queue))
+ dequeue_all_listeners(&px->listener_queue);
+
++ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
++
+ return 1;
+ }
+
+-/* Parses the "shutdown frontend" directive, it always returns 1 */
++/* Parses the "shutdown frontend" directive, it always returns 1.
++ *
++ * Grabs the proxy lock.
++ */
+ static int cli_parse_shutdown_frontend(char **args, struct appctx *appctx, void *private)
+ {
+ struct proxy *px;
+@@ -1711,14 +1751,22 @@ static int cli_parse_shutdown_frontend(char **args, struct appctx *appctx, void
+ px->id, px->fe_counters.cum_conn, px->be_counters.cum_conn);
+ send_log(px, LOG_WARNING, "Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n",
+ px->id, px->fe_counters.cum_conn, px->be_counters.cum_conn);
++
++ HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+ stop_proxy(px);
++ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
++
+ return 1;
+ }
+
+-/* Parses the "disable frontend" directive, it always returns 1 */
++/* Parses the "disable frontend" directive, it always returns 1.
++ *
++ * Grabs the proxy lock.
++ */
+ static int cli_parse_disable_frontend(char **args, struct appctx *appctx, void *private)
+ {
+ struct proxy *px;
++ int ret;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+@@ -1741,7 +1789,11 @@ static int cli_parse_disable_frontend(char **args, struct appctx *appctx, void *
+ return 1;
+ }
+
+- if (!pause_proxy(px)) {
++ HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
++ ret = pause_proxy(px);
++ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
++
++ if (!ret) {
+ appctx->ctx.cli.severity = LOG_ERR;
+ appctx->ctx.cli.msg = "Failed to pause frontend, check logs for precise cause.\n";
+ appctx->st0 = CLI_ST_PRINT;
+@@ -1750,10 +1802,14 @@ static int cli_parse_disable_frontend(char **args, struct appctx *appctx, void *
+ return 1;
+ }
+
+-/* Parses the "enable frontend" directive, it always returns 1 */
++/* Parses the "enable frontend" directive, it always returns 1.
++ *
++ * Grabs the proxy lock.
++ */
+ static int cli_parse_enable_frontend(char **args, struct appctx *appctx, void *private)
+ {
+ struct proxy *px;
++ int ret;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+@@ -1776,7 +1832,11 @@ static int cli_parse_enable_frontend(char **args, struct appctx *appctx, void *p
+ return 1;
+ }
+
+- if (!resume_proxy(px)) {
++ HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
++ ret = resume_proxy(px);
++ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
++
++ if (!ret) {
+ appctx->ctx.cli.severity = LOG_ERR;
+ appctx->ctx.cli.msg = "Failed to resume frontend, check logs for precise cause (port conflict?).\n";
+ appctx->st0 = CLI_ST_PRINT;
--- /dev/null
+commit 0dbaa252df906cc9c1d0dc7a075c16e039ab1c5b
+Author: Willy Tarreau <w@1wt.eu>
+Date: Tue Aug 21 15:35:31 2018 +0200
+
+ BUG/MEDIUM: cli/threads: protect some server commands against concurrent operations
+
+ The server-specific CLI commands "set weight", "set maxconn",
+ "disable agent", "enable agent", "disable health", "enable health",
+ "disable server" and "enable server" were not protected against
+ concurrent accesses. Now they take the server lock around the
+ sensitive part.
+
+ This patch must be backported to 1.8.
+
+ (cherry picked from commit 3bcc2699ba08dd3971ae7a56631994b2524d2acb)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/server.c b/src/server.c
+index 36a05e27..98dae535 100644
+--- a/src/server.c
++++ b/src/server.c
+@@ -4299,6 +4299,10 @@ static int cli_parse_get_weight(char **args, struct appctx *appctx, void *privat
+ return 1;
+ }
+
++/* Parse a "set weight" command.
++ *
++ * Grabs the server lock.
++ */
+ static int cli_parse_set_weight(char **args, struct appctx *appctx, void *private)
+ {
+ struct server *sv;
+@@ -4311,16 +4315,24 @@ static int cli_parse_set_weight(char **args, struct appctx *appctx, void *privat
+ if (!sv)
+ return 1;
+
++ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
++
+ warning = server_parse_weight_change_request(sv, args[3]);
+ if (warning) {
+ appctx->ctx.cli.severity = LOG_ERR;
+ appctx->ctx.cli.msg = warning;
+ appctx->st0 = CLI_ST_PRINT;
+ }
++
++ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
++
+ return 1;
+ }
+
+-/* parse a "set maxconn server" command. It always returns 1. */
++/* parse a "set maxconn server" command. It always returns 1.
++ *
++ * Grabs the server lock.
++ */
+ static int cli_parse_set_maxconn_server(char **args, struct appctx *appctx, void *private)
+ {
+ struct server *sv;
+@@ -4333,16 +4345,24 @@ static int cli_parse_set_maxconn_server(char **args, struct appctx *appctx, void
+ if (!sv)
+ return 1;
+
++ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
++
+ warning = server_parse_maxconn_change_request(sv, args[4]);
+ if (warning) {
+ appctx->ctx.cli.severity = LOG_ERR;
+ appctx->ctx.cli.msg = warning;
+ appctx->st0 = CLI_ST_PRINT;
+ }
++
++ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
++
+ return 1;
+ }
+
+-/* parse a "disable agent" command. It always returns 1. */
++/* parse a "disable agent" command. It always returns 1.
++ *
++ * Grabs the server lock.
++ */
+ static int cli_parse_disable_agent(char **args, struct appctx *appctx, void *private)
+ {
+ struct server *sv;
+@@ -4354,11 +4374,16 @@ static int cli_parse_disable_agent(char **args, struct appctx *appctx, void *pri
+ if (!sv)
+ return 1;
+
++ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ sv->agent.state &= ~CHK_ST_ENABLED;
++ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+ }
+
+-/* parse a "disable health" command. It always returns 1. */
++/* parse a "disable health" command. It always returns 1.
++ *
++ * Grabs the server lock.
++ */
+ static int cli_parse_disable_health(char **args, struct appctx *appctx, void *private)
+ {
+ struct server *sv;
+@@ -4370,11 +4395,16 @@ static int cli_parse_disable_health(char **args, struct appctx *appctx, void *pr
+ if (!sv)
+ return 1;
+
++ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ sv->check.state &= ~CHK_ST_ENABLED;
++ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+ }
+
+-/* parse a "disable server" command. It always returns 1. */
++/* parse a "disable server" command. It always returns 1.
++ *
++ * Grabs the server lock.
++ */
+ static int cli_parse_disable_server(char **args, struct appctx *appctx, void *private)
+ {
+ struct server *sv;
+@@ -4386,11 +4416,16 @@ static int cli_parse_disable_server(char **args, struct appctx *appctx, void *pr
+ if (!sv)
+ return 1;
+
++ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ srv_adm_set_maint(sv);
++ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+ }
+
+-/* parse a "enable agent" command. It always returns 1. */
++/* parse a "enable agent" command. It always returns 1.
++ *
++ * Grabs the server lock.
++ */
+ static int cli_parse_enable_agent(char **args, struct appctx *appctx, void *private)
+ {
+ struct server *sv;
+@@ -4409,11 +4444,16 @@ static int cli_parse_enable_agent(char **args, struct appctx *appctx, void *priv
+ return 1;
+ }
+
++ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ sv->agent.state |= CHK_ST_ENABLED;
++ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+ }
+
+-/* parse a "enable health" command. It always returns 1. */
++/* parse a "enable health" command. It always returns 1.
++ *
++ * Grabs the server lock.
++ */
+ static int cli_parse_enable_health(char **args, struct appctx *appctx, void *private)
+ {
+ struct server *sv;
+@@ -4425,11 +4465,16 @@ static int cli_parse_enable_health(char **args, struct appctx *appctx, void *pri
+ if (!sv)
+ return 1;
+
++ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ sv->check.state |= CHK_ST_ENABLED;
++ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+ }
+
+-/* parse a "enable server" command. It always returns 1. */
++/* parse a "enable server" command. It always returns 1.
++ *
++ * Grabs the server lock.
++ */
+ static int cli_parse_enable_server(char **args, struct appctx *appctx, void *private)
+ {
+ struct server *sv;
+@@ -4441,11 +4486,13 @@ static int cli_parse_enable_server(char **args, struct appctx *appctx, void *pri
+ if (!sv)
+ return 1;
+
++ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ srv_adm_set_ready(sv);
+ if (!(sv->flags & SRV_F_COOKIESET)
+ && (sv->proxy->ck_opts & PR_CK_DYNAMIC) &&
+ sv->cookie)
+ srv_check_for_dup_dyncookie(sv);
++ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+ }
+
--- /dev/null
+commit d13cb1516cb5ae4cb8322ed630e1d4e1f584fd77
+Author: Jens Bissinger <whiterabbit.init@googlemail.com>
+Date: Thu Aug 23 14:11:27 2018 +0200
+
+ DOC: Fix spelling error in configuration doc
+
+ Fix spelling error in logging section of configuration doc.
+
+ (cherry picked from commit 15c64ff4fb9f1f64b31306ac53b38fc4d5fb1538)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/doc/configuration.txt b/doc/configuration.txt
+index 43e28785..0dd212ad 100644
+--- a/doc/configuration.txt
++++ b/doc/configuration.txt
+@@ -16089,7 +16089,7 @@ Please refer to the table below for currently defined variables :
+ | | %t | date_time (with millisecond resolution) | date |
+ | H | %tr | date_time of HTTP request | date |
+ | H | %trg | gmt_date_time of start of HTTP request | date |
+- | H | %trl | locla_date_time of start of HTTP request | date |
++ | H | %trl | local_date_time of start of HTTP request | date |
+ | | %ts | termination_state | string |
+ | H | %tsc | termination_state with cookie status | string |
+ +---+------+-----------------------------------------------+-------------+
--- /dev/null
+commit f87ea7d2fdcfa3ccd5d605b3ce96642d28f20f6b
+Author: Willy Tarreau <w@1wt.eu>
+Date: Fri Aug 24 14:31:53 2018 +0200
+
+ BUG/MEDIUM: unix: provide a ->drain() function
+
+ Right now conn_sock_drain() calls the protocol's ->drain() function if
+ it exists, otherwise it simply tries to disable polling for receiving
+ on the connection. This doesn't work well anymore since we've implemented
+ the muxes in 1.8, and it has a side effect with keep-alive backend
+ connections established over unix sockets. What happens is that if
+ during the idle time after a request, a connection reports some data,
+ si_idle_conn_null_cb() is called, which will call conn_sock_drain().
+ This one sees there's no drain() on unix sockets and will simply disable
+ polling for data on the connection. But it doesn't do anything on the
+ conn_stream. Thus while leaving the conn_fd_handler, the mux's polling
+ is updated and recomputed based on the conn_stream's polling state,
+ which is still enabled, and nothing changes, so we see the process
+ use 100% CPU in this case because the FD remains active in the cache.
+
+ There are several issues that need to be addressed here. The first and
+ most important is that we cannot expect some protocols to simply stop
+ reading data when asked to drain pending data. So this patch make the
+ unix sockets rely on tcp_drain() since the functions are the same. This
+ solution is appropriate for backporting, but a better one is desired for
+ the long term. The second issue is that si_idle_conn_null_cb() shouldn't
+ drain the connection but the conn_stream.
+
+ At the moment we don't have any way to drain a conn_stream, though a flag
+ on rcv_buf() will do it well. Until we support muxes on the server side
+ it is not a problem so this part can be addressed later.
+
+ This fix must be backported to 1.8.
+
+ (cherry picked from commit fe5d2ac65fd58a8320e8dc725219c1bce5839592)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/proto_uxst.c b/src/proto_uxst.c
+index f2374be6..0b3a57b8 100644
+--- a/src/proto_uxst.c
++++ b/src/proto_uxst.c
+@@ -42,6 +42,7 @@
+ #include <proto/listener.h>
+ #include <proto/log.h>
+ #include <proto/protocol.h>
++#include <proto/proto_tcp.h>
+ #include <proto/task.h>
+
+ static int uxst_bind_listener(struct listener *listener, char *errmsg, int errlen);
+@@ -71,6 +72,7 @@ static struct protocol proto_unix = {
+ .disable_all = disable_all_listeners,
+ .get_src = uxst_get_src,
+ .get_dst = uxst_get_dst,
++ .drain = tcp_drain,
+ .pause = uxst_pause_listener,
+ .add = uxst_add_listener,
+ .listeners = LIST_HEAD_INIT(proto_unix.listeners),
--- /dev/null
+commit 5b58c92dc9357a87aa3fe94c8121f683feb9c80e
+Author: Frédéric Lécaille <flecaille@haproxy.com>
+Date: Wed Jul 18 14:25:26 2018 +0200
+
+ BUG/MINOR: lua: Bad HTTP client request duration.
+
+ HTTP LUA applet callback should not update the date on which the HTTP client requests
+ arrive. This was done just after the LUA applet has completed its job.
+
+ This patch simply removes the affected statement. The same fixe has been applied
+ to TCP LUA applet callback.
+
+ To reproduce this issue, as reported by Patrick Hemmer, implement an HTTP LUA applet
+ which sleeps a bit before replying:
+
+ core.register_service("foo", "http", function(applet)
+ core.msleep(100)
+ applet:set_status(200)
+ applet:start_response()
+ end)
+
+ This had as a consequence to log %TR field with approximatively the same value as
+ the LUA sleep time.
+
+ Thank you to Patrick Hemmer for having reported this issue.
+
+ Must be backported to 1.8, 1.7 and 1.6.
+
+ (cherry picked from commit 83ed5d58d2c767d03ce97aef484863a6e1c37a94)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/hlua.c b/src/hlua.c
+index daf775fc..8147ed15 100644
+--- a/src/hlua.c
++++ b/src/hlua.c
+@@ -6374,9 +6374,6 @@ static void hlua_applet_tcp_fct(struct appctx *ctx)
+ case HLUA_E_OK:
+ ctx->ctx.hlua_apptcp.flags |= APPLET_DONE;
+
+- /* log time */
+- strm->logs.tv_request = now;
+-
+ /* eat the whole request */
+ co_skip(si_oc(si), si_ob(si)->o);
+ res->flags |= CF_READ_NULL;
+@@ -6675,9 +6672,8 @@ static void hlua_applet_http_fct(struct appctx *ctx)
+
+ /* close the connection. */
+
+- /* status / log */
++ /* status */
+ strm->txn->status = ctx->ctx.hlua_apphttp.status;
+- strm->logs.tv_request = now;
+
+ /* eat the whole request */
+ co_skip(si_oc(si), si_ob(si)->o);
--- /dev/null
+commit d9a130e1962c2a5352f33088c563f4248a102c48
+Author: Willy Tarreau <w@1wt.eu>
+Date: Fri Aug 24 15:48:59 2018 +0200
+
+ BUG/MEDIUM: mux_pt: dereference the connection with care in mux_pt_wake()
+
+ mux_pt_wake() calls data->wake() which can return -1 indicating that the
+ connection was just destroyed. We need to check for this condition and
+ immediately exit in this case otherwise we dereference a just freed
+ connection. Note that this mainly happens on idle connections between
+ two HTTP requests. It can have random implications between requests as
+ it may lead a wrong connection's polling to be re-enabled or disabled
+ for example, especially with threads.
+
+ This patch must be backported to 1.8.
+
+ (cherry picked from commit ad7f0ad1c3c9c541a4c315b24d4500405d1383ee)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/mux_pt.c b/src/mux_pt.c
+index a68b9621..c43e30f2 100644
+--- a/src/mux_pt.c
++++ b/src/mux_pt.c
+@@ -51,6 +51,9 @@ static int mux_pt_wake(struct connection *conn)
+
+ ret = cs->data_cb->wake ? cs->data_cb->wake(cs) : 0;
+
++ if (ret < 0)
++ return ret;
++
+ /* If we had early data, and we're done with the handshake
+ * then whe know the data are safe, and we can remove the flag.
+ */