PKG_NAME:=haproxy
PKG_VERSION:=1.4.22
-PKG_RELEASE:=24
+PKG_RELEASE:=35
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=http://haproxy.1wt.eu/download/1.4/src
endef
define Build/Compile
- $(MAKE) TARGET=linux26 -C $(PKG_BUILD_DIR) \
+ $(MAKE) TARGET=linux28 -C $(PKG_BUILD_DIR) \
DESTDIR="$(PKG_INSTALL_DIR)" \
CC="$(TARGET_CC)" \
CFLAGS="$(TARGET_CFLAGS)" \
--- /dev/null
+From e35cc9549c74f696823a13d24df39be06192bf0b Mon Sep 17 00:00:00 2001
+From: Michael Scherer <misc@zarb.org>
+Date: Sat, 12 Jan 2013 18:35:19 +0100
+Subject: BUG/MEDIUM: remove supplementary groups when changing gid
+
+Without it, haproxy will retain the group membership of root, which may
+give more access than intended to the process. For example, haproxy would
+still be in the wheel group on Fedora 18, as seen with :
+
+ # haproxy -f /etc/haproxy/haproxy.cfg
+
+ # ps a -o pid,user,group,command | grep hapr
+ 3545 haproxy haproxy haproxy -f /etc/haproxy/haproxy.cfg
+ 4356 root root grep --color=auto hapr
+ # grep Group /proc/3545/status
+ Groups: 0 1 2 3 4 6 10
+ # getent group wheel
+ wheel:x:10:root,misc
+
+[WT: The issue has been investigated by independent security research team
+ and realized by itself not being able to allow security exploitation.
+ Additionally, dropping groups is not allowed to unprivileged users,
+ though this mode of deployment is quite common. Thus a warning is
+ emitted in this case to inform the user. The fix could be backported
+ into all supported versions as the issue has always been there. ]
+
+(cherry picked from commit ab012dd394d596f022c0d16f3584d5f61ffcf10e)
+---
+ doc/configuration.txt | 2 ++
+ src/haproxy.c | 15 +++++++++++----
+ 2 files changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/doc/configuration.txt b/doc/configuration.txt
+index e5d9af5..20b89c2 100644
+--- a/doc/configuration.txt
++++ b/doc/configuration.txt
+@@ -486,6 +486,8 @@ gid <number>
+ Changes the process' group ID to <number>. It is recommended that the group
+ ID is dedicated to HAProxy or to a small set of similar daemons. HAProxy must
+ be started with a user belonging to this group, or with superuser privileges.
++ Note that if haproxy is started from a user having supplementary groups, it
++ will only be able to drop these groups if started with superuser privileges.
+ See also "group" and "uid".
+
+ group <group name>
+diff --git a/src/haproxy.c b/src/haproxy.c
+index ec481aa..c302143 100644
+--- a/src/haproxy.c
++++ b/src/haproxy.c
+@@ -44,6 +44,7 @@
+ #include <sys/resource.h>
+ #include <time.h>
+ #include <syslog.h>
++#include <grp.h>
+
+ #ifdef DEBUG_FULL
+ #include <assert.h>
+@@ -1186,10 +1187,16 @@ int main(int argc, char **argv)
+ */
+
+ /* setgid / setuid */
+- if (global.gid && setgid(global.gid) == -1) {
+- Alert("[%s.main()] Cannot set gid %d.\n", argv[0], global.gid);
+- protocol_unbind_all();
+- exit(1);
++ if (global.gid) {
++ if (getgroups(0, NULL) > 0 && setgroups(0, NULL) == -1)
++ Warning("[%s.main()] Failed to drop supplementary groups. Using 'gid'/'group'"
++ " without 'uid'/'user' is generally useless.\n", argv[0]);
++
++ if (setgid(global.gid) == -1) {
++ Alert("[%s.main()] Cannot set gid %d.\n", argv[0], global.gid);
++ protocol_unbind_all();
++ exit(1);
++ }
+ }
+
+ if (global.uid && setuid(global.uid) == -1) {
+--
+1.7.1
+
--- /dev/null
+From a587005ff413866d7346e7448438d4c5be5a1cd7 Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Fri, 18 Jan 2013 15:22:41 +0100
+Subject: BUG/MINOR: epoll: use a fix maxevents argument in epoll_wait()
+
+epoll_wait() takes a number of returned events, not the number of
+fds to consider. We must not pass it the number of the smallest fd,
+as it leads to value zero being used, which is invalid in epoll_wait().
+The effect may sometimes be observed with peers sections trying to
+connect and causing 2-seconds CPU loops upon a soft reload because
+epoll_wait() immediately returns -1 EINVAL instead of waiting for the
+timeout to happen.
+
+This fix should be backported to 1.4 too (into ev_epoll and ev_sepoll).
+
+(cherry picked from commit cf181c9d404815f890da7cd2243a5528edd7b4f9)
+---
+ src/ev_epoll.c | 3 +--
+ src/ev_sepoll.c | 1 -
+ 2 files changed, 1 insertions(+), 3 deletions(-)
+
+diff --git a/src/ev_epoll.c b/src/ev_epoll.c
+index 0b22da6..1d213d9 100644
+--- a/src/ev_epoll.c
++++ b/src/ev_epoll.c
+@@ -249,8 +249,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
+ wait_time = MAX_DELAY_MS;
+ }
+
+- fd = MIN(maxfd, global.tune.maxpollevents);
+- status = epoll_wait(epoll_fd, epoll_events, fd, wait_time);
++ status = epoll_wait(epoll_fd, epoll_events, global.tune.maxpollevents, wait_time);
+ tv_update_date(wait_time, status);
+
+ for (count = 0; count < status; count++) {
+diff --git a/src/ev_sepoll.c b/src/ev_sepoll.c
+index 248f1f4..a3ef118 100644
+--- a/src/ev_sepoll.c
++++ b/src/ev_sepoll.c
+@@ -481,7 +481,6 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
+ */
+ fd = MIN(absmaxevents, spec_processed);
+ fd = MAX(global.tune.maxpollevents, fd);
+- fd = MIN(maxfd, fd);
+ /* we want to detect if an accept() will create new speculative FDs here */
+ fd_created = 0;
+ spec_processed = 0;
+--
+1.7.1
+
--- /dev/null
+From 48ffce4546ce99d51ea8c3dfb575c307e9f96ea5 Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Thu, 24 Jan 2013 00:25:39 +0100
+Subject: BUG/MINOR: config: fix improper check for failed memory alloc in ACL parser
+
+The wrong variable is checked after a calloc() so a memory shortage would
+result in a segfault while loading the config instead of a clean error.
+
+This fix may be backported to 1.4 and 1.3 which are both affected.
+
+Reported-by: Dinko Korunic <dkorunic@reflected.net>
+(cherry picked from commit f678b7f32253fa7b279f907dbda563e985c6478c)
+---
+ src/acl.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/src/acl.c b/src/acl.c
+index 35b6eb8..793e5ca 100644
+--- a/src/acl.c
++++ b/src/acl.c
+@@ -1230,7 +1230,7 @@ struct acl_cond *parse_acl_cond(const char **args, struct list *known_acl, int p
+
+ if (!cur_suite) {
+ cur_suite = (struct acl_term_suite *)calloc(1, sizeof(*cur_suite));
+- if (cur_term == NULL)
++ if (cur_suite == NULL)
+ goto out_free_term;
+ LIST_INIT(&cur_suite->terms);
+ LIST_ADDQ(&cond->suites, &cur_suite->list);
+--
+1.7.1
+
--- /dev/null
+From 9e98076edc9d3f25763473480e4aac6223bfd7d0 Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Thu, 24 Jan 2013 00:37:39 +0100
+Subject: BUG/MEDIUM: checks: ensure the health_status is always within bounds
+
+health_adjust() checks for incorrect bounds for the status argument.
+With current code, the argument is always a constant from the valid
+enum so there is no impact and the check is basically a NOP. However
+users running local patches (eg: new checks) might want to recheck
+their code.
+
+This fix should be backported to 1.4 which introduced the issue.
+
+Reported-by: Dinko Korunic <dkorunic@reflected.net>
+(cherry picked from commit bb95666bac94b6235eda431aba788644f7de7a3f)
+---
+ src/checks.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/src/checks.c b/src/checks.c
+index 201900a..13b5c64 100644
+--- a/src/checks.c
++++ b/src/checks.c
+@@ -584,7 +584,7 @@ void health_adjust(struct server *s, short status) {
+ if (s->observe >= HANA_OBS_SIZE)
+ return;
+
+- if (status >= HCHK_STATUS_SIZE || !analyze_statuses[status].desc)
++ if (status >= HANA_STATUS_SIZE || !analyze_statuses[status].desc)
+ return;
+
+ switch (analyze_statuses[status].lr[s->observe - 1]) {
+--
+1.7.1
+
--- /dev/null
+From 062501e5d3bea2989557fa84325fc894784da16b Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Thu, 24 Jan 2013 01:25:25 +0100
+Subject: CLEANUP: http: remove a useless null check
+
+srv cannot be null in perform_http_redirect(), as it's taken
+from the stream interface's target which is always valid for a
+server-based redirect, and it was already dereferenced above, so in
+practice, gcc already removes the test anyway.
+
+Reported-by: Dinko Korunic <dkorunic@reflected.net>
+(cherry picked from commit 4521ba689c506ce66e4843c887fbc555d2ea2006)
+---
+ src/proto_http.c | 3 +--
+ 1 files changed, 1 insertions(+), 2 deletions(-)
+
+diff --git a/src/proto_http.c b/src/proto_http.c
+index ffa2224..06b3743 100644
+--- a/src/proto_http.c
++++ b/src/proto_http.c
+@@ -798,8 +798,7 @@ void perform_http_redirect(struct session *s, struct stream_interface *si)
+ http_server_error(s, si, SN_ERR_PRXCOND, SN_FINST_C, 302, &rdr);
+
+ /* FIXME: we should increase a counter of redirects per server and per backend. */
+- if (s->srv)
+- srv_inc_sess_ctr(s->srv);
++ srv_inc_sess_ctr(s->srv);
+ }
+
+ /* Return the error message corresponding to si->err_type. It is assumed
+--
+1.7.1
+
--- /dev/null
+From cf196abaa639f2ad940327145e59458b2cb7d9fe Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Thu, 24 Jan 2013 02:06:05 +0100
+Subject: BUG/MEDIUM: signal: signal handler does not properly check for signal bounds
+
+sig is checked for < 0 or > MAX_SIGNAL, but the signal array is
+MAX_SIGNAL in size. At the moment, MAX_SIGNAL is 256. If a system supports
+more than MAX_SIGNAL signals, then sending signal MAX_SIGNAL to the process
+will corrupt one integer in its memory and might also crash the process.
+
+This bug is also present in 1.4 and 1.3, and the fix must be backported.
+
+Reported-by: Dinko Korunic <dkorunic@reflected.net>
+(cherry picked from commit 1a53b5ef583988ca0405007f3ef47d2114da9546)
+---
+ src/signal.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/signal.c b/src/signal.c
+index 9825c0d..a3c6cd4 100644
+--- a/src/signal.c
++++ b/src/signal.c
+@@ -38,7 +38,7 @@ void signal_init()
+
+ void signal_handler(int sig)
+ {
+- if (sig < 0 || sig > MAX_SIGNAL || !signal_state[sig].handler) {
++ if (sig < 0 || sig >= MAX_SIGNAL || !signal_state[sig].handler) {
+ /* unhandled signal */
+ qfprintf(stderr, "Received unhandled signal %d. Signal has been disabled.\n", sig);
+ signal(sig, SIG_IGN);
+@@ -64,7 +64,7 @@ void signal_handler(int sig)
+ */
+ void signal_register(int sig, void (*handler)(int))
+ {
+- if (sig < 0 || sig > MAX_SIGNAL) {
++ if (sig < 0 || sig >= MAX_SIGNAL) {
+ qfprintf(stderr, "Failed to register signal %d : out of range [0..%d].\n", sig, MAX_SIGNAL);
+ return;
+ }
+--
+1.7.1
+
--- /dev/null
+From 022ff7d0fd38505dbd87d7224ca20d1cdc729f01 Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Thu, 24 Jan 2013 02:26:43 +0100
+Subject: BUG/MEDIUM: uri_auth: missing NULL check and memory leak on memory shortage
+
+A test is obviously wrong in uri_auth(). If strdup(pass) returns an error
+while strdup(user) passes, the NULL pointer is still stored into the
+structure. If the user returns the NULL instead, the allocated memory is
+not released before returning the error.
+
+The issue was present in 1.4 so the fix should be backported.
+
+Reported-by: Dinko Korunic <dkorunic@reflected.net>
+(cherry picked from commit 0b291bdef1b9b6b539f44aa896eb1211c57a67a5)
+---
+ src/uri_auth.c | 13 ++++++++++---
+ 1 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/src/uri_auth.c b/src/uri_auth.c
+index fdbcef0..2344ac6 100644
+--- a/src/uri_auth.c
++++ b/src/uri_auth.c
+@@ -247,12 +247,19 @@ struct uri_auth *stats_add_auth(struct uri_auth **root, char *user)
+ return NULL;
+
+ newuser->user = strdup(user);
+- newuser->pass = strdup(pass);
+- newuser->flags |= AU_O_INSECURE;
++ if (!newuser->user) {
++ free(newuser);
++ return NULL;
++ }
+
+- if (!newuser->user || !newuser->user)
++ newuser->pass = strdup(pass);
++ if (!newuser->pass) {
++ free(newuser->user);
++ free(newuser);
+ return NULL;
++ }
+
++ newuser->flags |= AU_O_INSECURE;
+ newuser->next = u->userlist->users;
+ u->userlist->users = newuser;
+
+--
+1.7.1
+
--- /dev/null
+From d319dc8713c7db1eb54d0474c7c87aeaf1064b2f Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Thu, 24 Jan 2013 16:24:15 +0100
+Subject: CLEANUP: config: slowstart is never negative
+
+No need to check for a negative value in the "slowstart" argument, it's
+an unsigned.
+
+Reported-by: Dinko Korunic <dkorunic@reflected.net>
+(cherry picked from commit 3a3bbcd6f1fd3b5629ca1759104b0e58ad637dc0)
+---
+ src/cfgparse.c | 6 ------
+ 1 files changed, 0 insertions(+), 6 deletions(-)
+
+diff --git a/src/cfgparse.c b/src/cfgparse.c
+index 90fdbff..e55d30a 100644
+--- a/src/cfgparse.c
++++ b/src/cfgparse.c
+@@ -3669,12 +3669,6 @@ stats_error_parsing:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+- if (val < 0) {
+- Alert("parsing [%s:%d]: invalid value %d for argument '%s' of server %s.\n",
+- file, linenum, val, args[cur_arg], newsrv->id);
+- err_code |= ERR_ALERT | ERR_FATAL;
+- goto out;
+- }
+ newsrv->slowstart = (val + 999) / 1000;
+ cur_arg += 2;
+ }
+--
+1.7.1
+
--- /dev/null
+From a2c277afc6f12858939ba26160ce0f91fdb160d0 Mon Sep 17 00:00:00 2001
+From: Simon Horman <horms@verge.net.au>
+Date: Wed, 13 Feb 2013 17:48:00 +0900
+Subject: BUG/MINOR: Correct logic in cut_crlf()
+
+This corrects what appears to be logic errors in cut_crlf().
+I assume that the intention of this function is to truncate a
+string at the first cr or lf. However, currently lf are ignored.
+
+Also use '\0' instead of 0 as the null character, a cosmetic change.
+
+Cc: Krzysztof Piotr Oledzki <ole@ans.pl>
+Signed-off-by: Simon Horman <horms@verge.net.au>
+
+[WT: this fix may be backported to 1.4 too]
+(cherry picked from commit 5269cfb4585ebee9babc628e2fed672c00028743)
+---
+ include/common/standard.h | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/common/standard.h b/include/common/standard.h
+index cd1a609..064d216 100644
+--- a/include/common/standard.h
++++ b/include/common/standard.h
+@@ -282,14 +282,14 @@ unsigned int inetaddr_host_lim_ret(const char *text, char *stop, const char **re
+
+ static inline char *cut_crlf(char *s) {
+
+- while (*s != '\r' || *s == '\n') {
++ while (*s != '\r' && *s != '\n') {
+ char *p = s++;
+
+ if (!*p)
+ return p;
+ }
+
+- *s++ = 0;
++ *s++ = '\0';
+
+ return s;
+ }
+--
+1.7.1
+
--- /dev/null
+From a72ae88b2d68c3e772f14aed88687c676771e1c3 Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Wed, 13 Feb 2013 12:39:06 +0100
+Subject: BUILD: improve the makefile's support for libpcre
+
+Currently when cross-compiling, it's generally necessary to force
+PCREDIR which the Makefile automatically appends /include and /lib to.
+Unfortunately on most 64-bit linux distros, the lib path is instead
+/lib64, which is really annoying to fix in the makefile.
+
+So now we're computing PCRE_INC and PCRE_LIB from PCREDIR and using
+these ones instead. If one wants to force paths individually, it is
+possible to set them instead of setting PCREDIR. The old behaviour
+of not passing anything to the compiler when PCREDIR is forced to blank
+is conserved.
+
+(cherry picked from commit 39793095d7e19031504ed75aebfbb500680baaf7)
+---
+ Makefile | 40 ++++++++++++++++++++++------------------
+ 1 files changed, 22 insertions(+), 18 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 7633588..8d82543 100644
+--- a/Makefile
++++ b/Makefile
+@@ -55,6 +55,8 @@
+ # DLMALLOC_SRC : build with dlmalloc, indicate the location of dlmalloc.c.
+ # DLMALLOC_THRES : should match PAGE_SIZE on every platform (default: 4096).
+ # PCREDIR : force the path to libpcre.
++# PCRE_LIB : force the lib path to libpcre (defaults to $PCREDIR/lib).
++# PCRE_INC : force the include path to libpcre ($PCREDIR/inc)
+ # IGNOREGIT : ignore GIT commit versions if set.
+ # VERSION : force haproxy version reporting.
+ # SUBVERS : add a sub-version (eg: platform, model, ...).
+@@ -436,30 +438,32 @@ DLMALLOC_THRES = 4096
+ OPTIONS_OBJS += src/dlmalloc.o
+ endif
+
+-ifneq ($(USE_PCRE),)
+-# PCREDIR is the directory hosting include/pcre.h and lib/libpcre.*. It is
+-# automatically detected but can be forced if required. Forcing it to an empty
+-# string will result in search only in the default paths.
+-ifeq ($(PCREDIR),)
++ifneq ($(USE_PCRE)$(USE_STATIC_PCRE),)
++# PCREDIR is used to automatically construct the PCRE_INC and PCRE_LIB paths,
++# by appending /include and /lib respectively. If your system does not use the
++# same sub-directories, simply force these variables instead of PCREDIR. It is
++# automatically detected but can be forced if required (for cross-compiling).
++# Forcing PCREDIR to an empty string will let the compiler use the default
++# locations.
++
+ PCREDIR := $(shell pcre-config --prefix 2>/dev/null || echo /usr/local)
++ifneq ($(PCREDIR),)
++PCRE_INC := $(PCREDIR)/include
++PCRE_LIB := $(PCREDIR)/lib
+ endif
++
+ ifeq ($(USE_STATIC_PCRE),)
+-OPTIONS_CFLAGS += -DUSE_PCRE $(if $(PCREDIR),-I$(PCREDIR)/include)
+-OPTIONS_LDFLAGS += $(if $(PCREDIR),-L$(PCREDIR)/lib) -lpcreposix -lpcre
+-endif
++# dynamic PCRE
++OPTIONS_CFLAGS += -DUSE_PCRE $(if $(PCRE_INC),-I$(PCRE_INC))
++OPTIONS_LDFLAGS += $(if $(PCRE_LIB),-L$(PCRE_LIB)) -lpcreposix -lpcre
+ BUILD_OPTIONS += $(call ignore_implicit,USE_PCRE)
+-endif
+-
+-ifneq ($(USE_STATIC_PCRE),)
+-# PCREDIR is the directory hosting include/pcre.h and lib/libpcre.*. It is
+-# automatically detected but can be forced if required.
+-ifeq ($(PCREDIR),)
+-PCREDIR := $(shell pcre-config --prefix 2>/dev/null || echo /usr/local)
+-endif
+-OPTIONS_CFLAGS += -DUSE_PCRE $(if $(PCREDIR),-I$(PCREDIR)/include)
+-OPTIONS_LDFLAGS += $(if $(PCREDIR),-L$(PCREDIR)/lib) -Wl,-Bstatic -lpcreposix -lpcre -Wl,-Bdynamic
++else
++# static PCRE
++OPTIONS_CFLAGS += -DUSE_PCRE $(if $(PCRE_INC),-I$(PCRE_INC))
++OPTIONS_LDFLAGS += $(if $(PCRE_LIB),-L$(PCRE_LIB)) -Wl,-Bstatic -lpcreposix -lpcre -Wl,-Bdynamic
+ BUILD_OPTIONS += $(call ignore_implicit,USE_STATIC_PCRE)
+ endif
++endif
+
+ # This one can be changed to look for ebtree files in an external directory
+ EBTREE_DIR := ebtree
+--
+1.7.1
+
--- /dev/null
+From baa0b0fab303179d5195e347b1254b1da2c6ff33 Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Tue, 19 Mar 2013 08:19:59 +0100
+Subject: BUG/MINOR: checks: fix an warning introduced by commit 2f61455a
+
+Commit 2f61455a fixed a TIME_WAIT issue but introduced a warning as in
+case of an error, it relies on the shutr variable which is not initialized
+to decide whether or not to disable lingering on the socket. This has no
+impact obviously since the socket is already dead, but better fix this
+anyway and avoid doing the setsockopt() in this case.
+---
+ src/checks.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/src/checks.c b/src/checks.c
+index 13b5c64..e586e4c 100644
+--- a/src/checks.c
++++ b/src/checks.c
+@@ -888,7 +888,7 @@ static int event_srv_chk_r(int fd)
+ /* in case of TCP only, this tells us if the connection failed */
+ if (!(s->result & SRV_CHK_ERROR))
+ set_server_check_status(s, HCHK_STATUS_SOCKERR, NULL);
+-
++ shutr = 1;
+ goto out_wakeup;
+ }
+
+--
+1.7.1
+
--- /dev/null
+From e30c9c92868af1d50b7d273056b6fbfeb82aaf73 Mon Sep 17 00:00:00 2001
+From: Willy Tarreau <w@1wt.eu>
+Date: Sat, 16 Feb 2013 23:49:04 +0100
+Subject: MEDIUM: halog: add support for counting per source address (-ic)
+
+This is the same as -uc except that instead of counting URLs, it
+counts source addresses. The reported times are request times and
+not response times.
+
+The code becomes heavily ugly, the url struct is being abused to
+store an address, and there are no more bit fields available. The
+code needs a major revamp.
+(cherry picked from commit 7cf479cc09d4e4d142e8862e2a2792385f695439)
+---
+ contrib/halog/halog.c | 124 +++++++++++++++++++++++++++++++++++++++++++++---
+ 1 files changed, 116 insertions(+), 8 deletions(-)
+
+diff --git a/contrib/halog/halog.c b/contrib/halog/halog.c
+index 61034ec..9552998 100644
+--- a/contrib/halog/halog.c
++++ b/contrib/halog/halog.c
+@@ -114,6 +114,7 @@ struct url_stat {
+ FILT_COUNT_URL_BAVG|FILT_COUNT_URL_BTOT)
+
+ #define FILT_COUNT_COOK_CODES 0x40000000
++#define FILT_COUNT_IP_COUNT 0x80000000
+
+ unsigned int filter = 0;
+ unsigned int filter_invert = 0;
+@@ -126,6 +127,7 @@ int lines_max = -1;
+ const char *fgets2(FILE *stream);
+
+ void filter_count_url(const char *accept_field, const char *time_field, struct timer **tptr);
++void filter_count_ip(const char *source_field, const char *accept_field, const char *time_field, struct timer **tptr);
+ void filter_count_srv_status(const char *accept_field, const char *time_field, struct timer **tptr);
+ void filter_count_cook_codes(const char *accept_field, const char *time_field, struct timer **tptr);
+ void filter_count_term_codes(const char *accept_field, const char *time_field, struct timer **tptr);
+@@ -140,7 +142,7 @@ void usage(FILE *output, const char *msg)
+ "%s"
+ "Usage: halog [-h|--help] for long help\n"
+ " halog [-q] [-c] [-m <lines>]\n"
+- " {-cc|-gt|-pct|-st|-tc|-srv|-u|-uc|-ue|-ua|-ut|-uao|-uto|-uba|-ubt}\n"
++ " {-cc|-gt|-pct|-st|-tc|-srv|-u|-uc|-ue|-ua|-ut|-uao|-uto|-uba|-ubt|-ic}\n"
+ " [-s <skip>] [-e|-E] [-H] [-rt|-RT <time>] [-ad <delay>] [-ac <count>]\n"
+ " [-v] [-Q|-QS] [-tcn|-TCN <termcode>] [ -hs|-HS [min][:[max]] ] < log\n"
+ "\n",
+@@ -527,7 +529,7 @@ void truncated_line(int linenum, const char *line)
+
+ int main(int argc, char **argv)
+ {
+- const char *b, *e, *p, *time_field, *accept_field;
++ const char *b, *e, *p, *time_field, *accept_field, *source_field;
+ const char *filter_term_code_name = NULL;
+ const char *output_file = NULL;
+ int f, last, err;
+@@ -657,6 +659,8 @@ int main(int argc, char **argv)
+ filter |= FILT_COUNT_URL_BAVG;
+ else if (strcmp(argv[0], "-ubt") == 0)
+ filter |= FILT_COUNT_URL_BTOT;
++ else if (strcmp(argv[0], "-ic") == 0)
++ filter |= FILT_COUNT_IP_COUNT;
+ else if (strcmp(argv[0], "-o") == 0) {
+ if (output_file)
+ die("Fatal: output file name already specified.\n");
+@@ -721,13 +725,21 @@ int main(int argc, char **argv)
+ while ((line = fgets2(stdin)) != NULL) {
+ linenum++;
+ time_field = NULL; accept_field = NULL;
++ source_field = NULL;
+
+ test = 1;
+
+ /* for any line we process, we first ensure that there is a field
+ * looking like the accept date field (beginning with a '[').
+ */
+- accept_field = field_start(line, ACCEPT_FIELD + skip_fields);
++ if (filter & FILT_COUNT_IP_COUNT) {
++ /* we need the IP first */
++ source_field = field_start(line, SOURCE_FIELD + skip_fields);
++ accept_field = field_start(source_field, ACCEPT_FIELD - SOURCE_FIELD + 1);
++ }
++ else
++ accept_field = field_start(line, ACCEPT_FIELD + skip_fields);
++
+ if (unlikely(*accept_field != '[')) {
+ parse_err++;
+ continue;
+@@ -869,8 +881,12 @@ int main(int argc, char **argv)
+
+ /************** here we process inputs *******************/
+
+- if (line_filter)
+- line_filter(accept_field, time_field, &t);
++ if (line_filter) {
++ if (filter & FILT_COUNT_IP_COUNT)
++ filter_count_ip(source_field, accept_field, time_field, &t);
++ else
++ line_filter(accept_field, time_field, &t);
++ }
+ else
+ lines_out++; /* FILT_COUNT_ONLY was used, so we're just counting lines */
+ if (lines_max >= 0 && lines_out >= lines_max)
+@@ -1047,7 +1063,7 @@ int main(int argc, char **argv)
+ n = eb32_next(n);
+ }
+ }
+- else if (filter & FILT_COUNT_URL_ANY) {
++ else if (filter & (FILT_COUNT_URL_ANY|FILT_COUNT_IP_COUNT)) {
+ struct eb_node *node, *next;
+
+ if (!(filter & FILT_COUNT_URL_ONLY)) {
+@@ -1062,7 +1078,7 @@ int main(int argc, char **argv)
+
+ ustat = container_of(node, struct url_stat, node.url.node);
+
+- if (filter & FILT_COUNT_URL_COUNT)
++ if (filter & (FILT_COUNT_URL_COUNT|FILT_COUNT_IP_COUNT))
+ ustat->node.val.key = ustat->nb_req;
+ else if (filter & FILT_COUNT_URL_ERR)
+ ustat->node.val.key = ustat->nb_err;
+@@ -1087,7 +1103,10 @@ int main(int argc, char **argv)
+ timers[0] = timers[1];
+ }
+
+- printf("#req err ttot tavg oktot okavg bavg btot url\n");
++ if (FILT_COUNT_IP_COUNT)
++ printf("#req err ttot tavg oktot okavg bavg btot src\n");
++ else
++ printf("#req err ttot tavg oktot okavg bavg btot url\n");
+
+ /* scan the tree in its reverse sorting order */
+ node = eb_last(&timers[0]);
+@@ -1410,6 +1429,95 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
+ }
+ }
+
++void filter_count_ip(const char *source_field, const char *accept_field, const char *time_field, struct timer **tptr)
++{
++ struct url_stat *ustat = NULL;
++ struct ebpt_node *ebpt_old;
++ const char *b, *e;
++ int f, err, array[5];
++ int val;
++
++ /* let's collect the response time */
++ if (!time_field) {
++ time_field = field_start(accept_field, TIME_FIELD - ACCEPT_FIELD + 1); // avg 115 ns per line
++ if (unlikely(!*time_field)) {
++ truncated_line(linenum, line);
++ return;
++ }
++ }
++
++ /* we have the field TIME_FIELD starting at <time_field>. We'll
++ * parse the 5 timers to detect errors, it takes avg 55 ns per line.
++ */
++ e = time_field; err = 0; f = 0;
++ while (!SEP(*e)) {
++ if (f == 0 || f == 4) {
++ array[f] = str2ic(e);
++ if (array[f] < 0) {
++ array[f] = -1;
++ err = 1;
++ }
++ }
++ if (++f == 5)
++ break;
++ SKIP_CHAR(e, '/');
++ }
++ if (f < 5) {
++ parse_err++;
++ return;
++ }
++
++ /* OK we have our timers in array[0], and err is >0 if at
++ * least one -1 was seen. <e> points to the first char of
++ * the last timer. Let's prepare a new node with that.
++ */
++ if (unlikely(!ustat))
++ ustat = calloc(1, sizeof(*ustat));
++
++ ustat->nb_err = err;
++ ustat->nb_req = 1;
++
++ /* use array[4] = total time in case of error */
++ ustat->total_time = (array[0] >= 0) ? array[0] : array[4];
++ ustat->total_time_ok = (array[0] >= 0) ? array[0] : 0;
++
++ e = field_start(e, BYTES_SENT_FIELD - TIME_FIELD + 1);
++ val = str2ic(e);
++ ustat->total_bytes_sent = val;
++
++ /* the source might be IPv4 or IPv6, so we always strip the port by
++ * removing the last colon.
++ */
++ b = source_field;
++ e = field_stop(b + 1);
++ while (e > b && e[-1] != ':')
++ e--;
++ *(char *)(e - 1) = '\0';
++
++ /* now instead of copying the src for a simple lookup, we'll link
++ * to it from the node we're trying to insert. If it returns a
++ * different value, it was already there. Otherwise we just have
++ * to dynamically realloc an entry using strdup(). We're using the
++ * <url> field of the node to store the source address.
++ */
++ ustat->node.url.key = (char *)b;
++ ebpt_old = ebis_insert(&timers[0], &ustat->node.url);
++
++ if (ebpt_old != &ustat->node.url) {
++ struct url_stat *ustat_old;
++ /* node was already there, let's update previous one */
++ ustat_old = container_of(ebpt_old, struct url_stat, node.url);
++ ustat_old->nb_req ++;
++ ustat_old->nb_err += ustat->nb_err;
++ ustat_old->total_time += ustat->total_time;
++ ustat_old->total_time_ok += ustat->total_time_ok;
++ ustat_old->total_bytes_sent += ustat->total_bytes_sent;
++ } else {
++ ustat->url = ustat->node.url.key = strdup(ustat->node.url.key);
++ ustat = NULL; /* node was used */
++ }
++}
++
+ void filter_graphs(const char *accept_field, const char *time_field, struct timer **tptr)
+ {
+ struct timer *t2;
+--
+1.7.1
+