PKG_NAME:=haproxy
PKG_VERSION:=1.8.8
-PKG_RELEASE:=02
+PKG_RELEASE:=03
PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/
--- /dev/null
+commit 80e179128cfd78d95cdebf7195fd21299e7931b6
+Author: Willy Tarreau <w@1wt.eu>
+Date: Sat Apr 28 07:18:15 2018 +0200
+
+ BUG/MINOR: config: disable http-reuse on TCP proxies
+
+ Louis Chanouha reported an inappropriate warning when http-reuse is
+ present in a defaults section while a TCP proxy accidently inherits
+ it and finds a conflict with other options like the use of the PROXY
+ protocol. To fix this patch removes the http-reuse option for TCP
+ proxies.
+
+ This fix needs to be backported to 1.8, 1.7 and possibly 1.6.
+
+ (cherry picked from commit 46deab6e64bfda7211b7c3199ad01f136141c86f)
+ Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/cfgparse.c b/src/cfgparse.c
+index 5a460381..63d2de58 100644
+--- a/src/cfgparse.c
++++ b/src/cfgparse.c
+@@ -8702,6 +8702,9 @@ out_uri_auth_compat:
+ }
+ #endif
+
++ if ((curproxy->mode != PR_MODE_HTTP) && (curproxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR)
++ curproxy->options &= ~PR_O_REUSE_MASK;
++
+ if ((curproxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) {
+ if ((curproxy->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_CLI ||
+ (curproxy->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_CIP ||
--- /dev/null
+commit edb5a1efd22eb9918574d962640cd2ae3bb45ad3
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date: Wed May 2 12:12:45 2018 +0200
+
+ BUG/MINOR: checks: Fix check->health computation for flapping servers
+
+ This patch fixes an old bug introduced in the commit 7b1d47ce ("MAJOR: checks:
+ move health checks changes to set_server_check_status()"). When a DOWN server is
+ flapping, everytime a check succeds, check->health is incremented. But when a
+ check fails, it is decremented only when it is higher than the rise value. So if
+ only one check succeds for a DOWN server, check->health will remain set to 1 for
+ all subsequent failing checks.
+
+ So, at first glance, it seems not that terrible because the server remains
+ DOWN. But it is reported in the transitional state "DOWN server, going up". And
+ it will remain in this state until it is UP again. And there is also an
+ insidious side effect. If a DOWN server is flapping time to time, It will end to
+ be considered UP after a uniq successful check, , regardless the rise threshold,
+ because check->health will be increased slowly and never decreased.
+
+ To fix the bug, we just need to reset check->health to 0 when a check fails for
+ a DOWN server. To do so, we just need to relax the condition to handle a failure
+ in the function set_server_check_status.
+
+ This patch must be backported to haproxy 1.5 and newer.
+
+ (cherry picked from commit b119a79fc336f2b6074de1c3113b1682c717985c)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/checks.c b/src/checks.c
+index 80a9c70d..d07a82f8 100644
+--- a/src/checks.c
++++ b/src/checks.c
+@@ -243,7 +243,7 @@ static void set_server_check_status(struct check *check, short status, const cha
+ */
+ if ((!(check->state & CHK_ST_AGENT) ||
+ (check->status >= HCHK_STATUS_L57DATA)) &&
+- (check->health >= check->rise)) {
++ (check->health > 0)) {
+ HA_ATOMIC_ADD(&s->counters.failed_checks, 1);
+ report = 1;
+ check->health--;
--- /dev/null
+commit 830324444e57c042666b17ac4584352cca85dafd
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date: Wed May 2 16:58:40 2018 +0200
+
+ BUG/MEDIUM: threads: Fix the sync point for more than 32 threads
+
+ In the sync point, to know if a thread has requested a synchronization, we call
+ the function thread_need_sync(). It should return 1 if yes, otherwise it should
+ return 0. It is intended to return a signed integer.
+
+ But internally, instead of returning 0 or 1, it returns 0 or tid_bit
+ (threads_want_sync & tid_bit). So, tid_bit is casted in integer. For the first
+ 32 threads, it's ok, because we always check if thread_need_sync() returns
+ something else than 0. But this is a problem if HAProxy is started with more
+ than 32 threads, because for threads 33 to 64 (so for tid 32 to 63), their
+ tid_bit casted to integer are evaluated to 0. So the sync point does not work for
+ more than 32 threads.
+
+ Now, the function thread_need_sync() respects its contract, returning 0 or
+ 1. the function thread_no_sync() has also been updated to avoid any ambiguities.
+
+ This patch must be backported in HAProxy 1.8.
+
+ (cherry picked from commit 148b16e1ceb819dfcef4c45828121d9cd7474b35)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/hathreads.c b/src/hathreads.c
+index daf226ce..944a0d5b 100644
+--- a/src/hathreads.c
++++ b/src/hathreads.c
+@@ -85,7 +85,7 @@ void thread_want_sync()
+ /* Returns 1 if no thread has requested a sync. Otherwise, it returns 0. */
+ int thread_no_sync()
+ {
+- return (threads_want_sync == 0);
++ return (threads_want_sync == 0UL);
+ }
+
+ /* Returns 1 if the current thread has requested a sync. Otherwise, it returns
+@@ -93,7 +93,7 @@ int thread_no_sync()
+ */
+ int thread_need_sync()
+ {
+- return (threads_want_sync & tid_bit);
++ return ((threads_want_sync & tid_bit) != 0UL);
+ }
+
+ /* Thread barrier. Synchronizes all threads at the barrier referenced by
--- /dev/null
+commit 335bc7b74eee84f0a3bcb615cadd23fe01d1336c
+Author: PiBa-NL <PiBa.NL.dev@gmail.com>
+Date: Wed May 2 22:27:14 2018 +0200
+
+ BUG/MINOR: lua: Put tasks to sleep when waiting for data
+
+ If a lua socket is waiting for data it currently spins at 100% cpu usage.
+ This because the TICK_ETERNITY returned by the socket is ignored when
+ setting the 'expire' time of the task.
+
+ Fixed by removing the check for yields that return TICK_ETERNITY.
+
+ This should be backported to at least 1.8.
+
+ (cherry picked from commit fe971b35aeca9994f3823112c783aa796e74075a)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/hlua.c b/src/hlua.c
+index bd0b87e3..0100e7cf 100644
+--- a/src/hlua.c
++++ b/src/hlua.c
+@@ -5536,8 +5536,7 @@ static struct task *hlua_process_task(struct task *task)
+
+ case HLUA_E_AGAIN: /* co process or timeout wake me later. */
+ notification_gc(&hlua->com);
+- if (hlua->wake_time != TICK_ETERNITY)
+- task->expire = hlua->wake_time;
++ task->expire = hlua->wake_time;
+ break;
+
+ /* finished with error. */
--- /dev/null
+commit 016feef5483397491af3242162934d9e9dbc6263
+Author: Patrick Hemmer <haproxy@stormcloud9.net>
+Date: Tue May 1 21:30:41 2018 -0400
+
+ DOC/MINOR: clean up LUA documentation re: servers & array/table.
+
+ * A few typos
+ * Fix definitions of values which are tables, not arrays.
+ * Consistent US English naming for "server" instead of "serveur".
+
+ [tfo: should be backported to 1.6 and higher]
+
+ (cherry picked from commit c6a1d711a4d47d68611aa28adecdadba96221bde)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/doc/lua-api/index.rst b/doc/lua-api/index.rst
+index e7aa425d..2d210945 100644
+--- a/doc/lua-api/index.rst
++++ b/doc/lua-api/index.rst
+@@ -169,9 +169,9 @@ Core class
+
+ **context**: task, action, sample-fetch, converter
+
+- This attribute is an array of declared proxies (frontend and backends). Each
+- proxy give an access to his list of listeners and servers. Each entry is of
+- type :ref:`proxy_class`
++ This attribute is a table of declared proxies (frontend and backends). Each
++ proxy give an access to his list of listeners and servers. The table is
++ indexed by proxy name, and each entry is of type :ref:`proxy_class`.
+
+ Warning, if you are declared frontend and backend with the same name, only one
+ of these are listed.
+@@ -183,12 +183,9 @@ Core class
+
+ **context**: task, action, sample-fetch, converter
+
+- This attribute is an array of declared proxies with backend capability. Each
+- proxy give an access to his list of listeners and servers. Each entry is of
+- type :ref:`proxy_class`
+-
+- Warning, if you are declared frontend and backend with the same name, only one
+- of these are listed.
++ This attribute is a table of declared proxies with backend capability. Each
++ proxy give an access to his list of listeners and servers. The table is
++ indexed by the backend name, and each entry is of type :ref:`proxy_class`.
+
+ :see: :js:attr:`core.proxies`
+ :see: :js:attr:`core.frontends`
+@@ -197,12 +194,9 @@ Core class
+
+ **context**: task, action, sample-fetch, converter
+
+- This attribute is an array of declared proxies with frontend capability. Each
+- proxy give an access to his list of listeners and servers. Each entry is of
+- type :ref:`proxy_class`
+-
+- Warning, if you are declared frontend and backend with the same name, only one
+- of these are listed.
++ This attribute is a table of declared proxies with frontend capability. Each
++ proxy give an access to his list of listeners and servers. The table is
++ indexed by the frontend name, and each entry is of type :ref:`proxy_class`.
+
+ :see: :js:attr:`core.proxies`
+ :see: :js:attr:`core.backends`
+@@ -336,7 +330,7 @@ Core class
+ Lua execution or resume, so two consecutive call to the function "now" will
+ probably returns the same result.
+
+- :returns: an array which contains two entries "sec" and "usec". "sec"
++ :returns: a table which contains two entries "sec" and "usec". "sec"
+ contains the current at the epoch format, and "usec" contains the
+ current microseconds.
+
+@@ -439,9 +433,12 @@ Core class
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+- proxies is an array containing the list of all proxies declared in the
+- configuration file. Each entry of the proxies array is an object of type
+- :ref:`proxy_class`
++ proxies is a table containing the list of all proxies declared in the
++ configuration file. The table is indexed by the proxy name, and each entry
++ of the proxies table is an object of type :ref:`proxy_class`.
++
++ Warning, if you have declared a frontend and backend with the same name, only
++ one of these are listed.
+
+ .. js:function:: core.register_action(name, actions, func [, nb_args])
+
+@@ -852,13 +849,14 @@ Proxy class
+
+ .. js:attribute:: Proxy.servers
+
+- Contain an array with the attached servers. Each server entry is an object of
+- type :ref:`server_class`.
++ Contain a table with the attached servers. The table is indexed by server
++ name, and each server entry is an object of type :ref:`server_class`.
+
+ .. js:attribute:: Proxy.listeners
+
+- Contain an array with the attached listeners. Each listeners entry is an
+- object of type :ref:`listener_class`.
++ Contain a table with the attached listeners. The table is indexed by listener
++ name, and each each listeners entry is an object of type
++ :ref:`listener_class`.
+
+ .. js:function:: Proxy.pause(px)
+
+@@ -908,21 +906,25 @@ Proxy class
+
+ .. js:function:: Proxy.get_stats(px)
+
+- Returns an array containg the proxy statistics. The statistics returned are
++ Returns a table containg the proxy statistics. The statistics returned are
+ not the same if the proxy is frontend or a backend.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+- :returns: a key/value array containing stats
++ :returns: a key/value table containing stats
+
+ .. _server_class:
+
+ Server class
+ ============
+
++.. js:class:: Server
++
++ This class provides a way for manipulating servers and retrieving information.
++
+ .. js:function:: Server.is_draining(sv)
+
+- Return true if the server is currently draining stiky connections.
++ Return true if the server is currently draining sticky connections.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+@@ -930,7 +932,7 @@ Server class
+
+ .. js:function:: Server.set_weight(sv, weight)
+
+- Dynamically change the weight of the serveur. See the management socket
++ Dynamically change the weight of the server. See the management socket
+ documentation for more information about the format of the string.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+@@ -939,7 +941,7 @@ Server class
+
+ .. js:function:: Server.get_weight(sv)
+
+- This function returns an integer representing the serveur weight.
++ This function returns an integer representing the server weight.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+@@ -947,16 +949,16 @@ Server class
+
+ .. js:function:: Server.set_addr(sv, addr)
+
+- Dynamically change the address of the serveur. See the management socket
++ Dynamically change the address of the server. See the management socket
+ documentation for more information about the format of the string.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+- :param string weight: A string describing the server address.
++ :param string addr: A string describing the server address.
+
+ .. js:function:: Server.get_addr(sv)
+
+- Returns a string describing the address of the serveur.
++ Returns a string describing the address of the server.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+@@ -968,7 +970,7 @@ Server class
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+- :returns: a key/value array containing stats
++ :returns: a key/value table containing stats
+
+ .. js:function:: Server.shut_sess(sv)
+
+@@ -1085,7 +1087,7 @@ Listener class
+
+ :param class_listener ls: A :ref:`listener_class` which indicates the
+ manipulated listener.
+- :returns: a key/value array containing stats
++ :returns: a key/value table containing stats
+
+ .. _concat_class:
+
+@@ -1169,7 +1171,7 @@ Fetches class
+ usage. they are the chapters 7.3.2 to 7.3.6.
+
+ **warning** some sample fetches are not available in some context. These
+- limitations are specified in this documentation when theire useful.
++ limitations are specified in this documentation when they're useful.
+
+ :see: :js:attr:`TXN.f`
+ :see: :js:attr:`TXN.sf`
+@@ -1345,13 +1347,13 @@ HTTP class
+
+ .. js:function:: HTTP.req_get_headers(http)
+
+- Returns an array containing all the request headers.
++ Returns a table containing all the request headers.
+
+ :param class_http http: The related http object.
+- :returns: array of headers.
++ :returns: table of headers.
+ :see: :js:func:`HTTP.res_get_headers`
+
+- This is the form of the returned array:
++ This is the form of the returned table:
+
+ .. code-block:: lua
+
+@@ -1366,13 +1368,13 @@ HTTP class
+
+ .. js:function:: HTTP.res_get_headers(http)
+
+- Returns an array containing all the response headers.
++ Returns a table containing all the response headers.
+
+ :param class_http http: The related http object.
+- :returns: array of headers.
++ :returns: table of headers.
+ :see: :js:func:`HTTP.req_get_headers`
+
+- This is the form of the returned array:
++ This is the form of the returned table:
+
+ .. code-block:: lua
+
+@@ -2210,12 +2212,12 @@ AppletHTTP class
+
+ .. js:attribute:: AppletHTTP.headers
+
+- :returns: array
++ :returns: table
+
+- The attribute headers returns an array containing the HTTP
++ The attribute headers returns a table containing the HTTP
+ headers. The header names are always in lower case. As the header name can be
+ encountered more than once in each request, the value is indexed with 0 as
+- first index value. The array have this form:
++ first index value. The table have this form:
+
+ .. code-block:: lua
+
--- /dev/null
+commit b2219ae216a141acdf0e2a3f67d2c85aee2a2bc2
+Author: Dragan Dosen <ddosen@haproxy.com>
+Date: Fri May 4 16:27:15 2018 +0200
+
+ BUG/MINOR: map: correctly track reference to the last ref_elt being dumped
+
+ The bug was introduced in the commit 8d85aa4 ("BUG/MAJOR: map: fix
+ segfault during 'show map/acl' on cli").
+
+ This patch should be backported to 1.8, 1.7 and 1.6.
+
+ (cherry picked from commit 336a11f75571ad46f74a7c6247c13ed44f95da93)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/map.c b/src/map.c
+index f40e4394..a9a1e53c 100644
+--- a/src/map.c
++++ b/src/map.c
+@@ -307,9 +307,9 @@ static int cli_io_handler_pat_list(struct appctx *appctx)
+ * reference to the last ref_elt being dumped.
+ */
+ if (appctx->st2 == STAT_ST_LIST) {
+- if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) {
+- LIST_DEL(&appctx->ctx.sess.bref.users);
+- LIST_INIT(&appctx->ctx.sess.bref.users);
++ if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) {
++ LIST_DEL(&appctx->ctx.map.bref.users);
++ LIST_INIT(&appctx->ctx.map.bref.users);
+ }
+ }
+ return 1;
--- /dev/null
+commit a0f0db361978154474d76028183647d5991f3b5c
+Author: Olivier Houchard <ohouchard@haproxy.com>
+Date: Fri May 4 15:46:16 2018 +0200
+
+ BUG/MEDIUM: task: Don't free a task that is about to be run.
+
+ While running a task, we may try to delete and free a task that is about to
+ be run, because it's part of the local tasks list, or because rq_next points
+ to it.
+ So flag any task that is in the local tasks list to be deleted, instead of
+ run, by setting t->process to NULL, and re-make rq_next a global,
+ thread-local variable, that is modified if we attempt to delete that task.
+
+ Many thanks to PiBa-NL for reporting this and analysing the problem.
+
+ This should be backported to 1.8.
+
+ (cherry picked from commit 9b36cb4a414c22e13d344afbbe70684e9f2f1d49)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/include/proto/task.h b/include/proto/task.h
+index cbc1a907..c1c4c07e 100644
+--- a/include/proto/task.h
++++ b/include/proto/task.h
+@@ -90,6 +90,8 @@ extern unsigned int nb_tasks_cur;
+ extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
+ extern struct pool_head *pool_head_task;
+ extern struct pool_head *pool_head_notification;
++extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */
++extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */
+
+ __decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
+ __decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */
+@@ -177,8 +179,11 @@ static inline struct task *__task_unlink_rq(struct task *t)
+ static inline struct task *task_unlink_rq(struct task *t)
+ {
+ HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
+- if (likely(task_in_rq(t)))
++ if (likely(task_in_rq(t))) {
++ if (&t->rq == rq_next)
++ rq_next = eb32sc_next(rq_next, tid_bit);
+ __task_unlink_rq(t);
++ }
+ HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+ return t;
+ }
+@@ -230,7 +235,7 @@ static inline struct task *task_new(unsigned long thread_mask)
+ * Free a task. Its context must have been freed since it will be lost.
+ * The task count is decremented.
+ */
+-static inline void task_free(struct task *t)
++static inline void __task_free(struct task *t)
+ {
+ pool_free(pool_head_task, t);
+ if (unlikely(stopping))
+@@ -238,6 +243,18 @@ static inline void task_free(struct task *t)
+ HA_ATOMIC_SUB(&nb_tasks, 1);
+ }
+
++static inline void task_free(struct task *t)
++{
++ /* There's no need to protect t->state with a lock, as the task
++ * has to run on the current thread.
++ */
++ if (t == curr_task || !(t->state & TASK_RUNNING))
++ __task_free(t);
++ else
++ t->process = NULL;
++}
++
++
+ /* Place <task> into the wait queue, where it may already be. If the expiration
+ * timer is infinite, do nothing and rely on wake_expired_task to clean up.
+ */
+diff --git a/src/task.c b/src/task.c
+index fd9acf66..3d021bb4 100644
+--- a/src/task.c
++++ b/src/task.c
+@@ -39,6 +39,7 @@ unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
+ unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
+
+ THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */
++THREAD_LOCAL struct eb32sc_node *rq_next = NULL; /* Next task to be potentially run */
+
+ __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */
+ __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */
+@@ -186,7 +187,6 @@ void process_runnable_tasks()
+ struct task *t;
+ int i;
+ int max_processed;
+- struct eb32sc_node *rq_next;
+ struct task *local_tasks[16];
+ int local_tasks_count;
+ int final_tasks_count;
+@@ -227,8 +227,14 @@ void process_runnable_tasks()
+ */
+ if (likely(t->process == process_stream))
+ t = process_stream(t);
+- else
+- t = t->process(t);
++ else {
++ if (t->process != NULL)
++ t = t->process(t);
++ else {
++ __task_free(t);
++ t = NULL;
++ }
++ }
+ curr_task = NULL;
+
+ if (likely(t != NULL)) {
+@@ -309,8 +315,14 @@ void process_runnable_tasks()
+ curr_task = t;
+ if (likely(t->process == process_stream))
+ t = process_stream(t);
+- else
+- t = t->process(t);
++ else {
++ if (t->process != NULL)
++ t = t->process(t);
++ else {
++ __task_free(t);
++ t = NULL;
++ }
++ }
+ curr_task = NULL;
+ if (t)
+ local_tasks[final_tasks_count++] = t;
--- /dev/null
+commit 52ec3578c3ddc688ae14da3cd3e7e351494603d8
+Author: PiBa-NL <PiBa.NL.dev@gmail.com>
+Date: Sat May 5 23:51:42 2018 +0200
+
+ BUG/MINOR: lua: schedule socket task upon lua connect()
+
+ The parameters like server-address, port and timeout should be set before
+ process_stream task is called to avoid the stream being 'closed' before it
+ got initialized properly. This is most clearly visible when running with
+ tune.lua.forced-yield=1.. So scheduling the task should not be done when
+ creating the lua socket, but when connect is called. The error
+ "socket: not yet initialised, you can't set timeouts." would then appear.
+
+ Below code for example also shows this issue, as the sleep will
+ yield the lua code:
+ local con = core.tcp()
+ core.sleep(1)
+ con:settimeout(10)
+
+ (cherry picked from commit 706d5ee0c366787536213ccd6dea264d20b76a22)
+ [wt: must be backported to 1.7 and 1.6 as well with a different patch,
+ see https://www.mail-archive.com/haproxy@formilux.org/msg29924.html]
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/hlua.c b/src/hlua.c
+index 0100e7cf..5cc918c9 100644
+--- a/src/hlua.c
++++ b/src/hlua.c
+@@ -2415,6 +2415,10 @@ __LJMP static int hlua_socket_connect(struct lua_State *L)
+ WILL_LJMP(luaL_error(L, "out of memory"));
+ }
+ xref_unlock(&socket->xref, peer);
++
++ task_wakeup(s->task, TASK_WOKEN_INIT);
++ /* Return yield waiting for connection. */
++
+ WILL_LJMP(hlua_yieldk(L, 0, 0, hlua_socket_connect_yield, TICK_ETERNITY, 0));
+
+ return 0;
+@@ -2566,8 +2570,6 @@ __LJMP static int hlua_socket_new(lua_State *L)
+ strm->flags |= SF_DIRECT | SF_ASSIGNED | SF_ADDR_SET | SF_BE_ASSIGNED;
+ strm->target = &socket_tcp.obj_type;
+
+- task_wakeup(strm->task, TASK_WOKEN_INIT);
+- /* Return yield waiting for connection. */
+ return 1;
+
+ out_fail_stream: