enum rxrpc_timer_trace {
rxrpc_timer_begin,
+ rxrpc_timer_exp_ack,
+ rxrpc_timer_exp_hard,
+ rxrpc_timer_exp_idle,
+ rxrpc_timer_exp_normal,
+ rxrpc_timer_exp_ping,
+ rxrpc_timer_exp_resend,
rxrpc_timer_expired,
rxrpc_timer_init_for_reply,
rxrpc_timer_init_for_send_reply,
+ rxrpc_timer_restart,
rxrpc_timer_set_for_ack,
+ rxrpc_timer_set_for_hard,
+ rxrpc_timer_set_for_idle,
+ rxrpc_timer_set_for_normal,
rxrpc_timer_set_for_ping,
rxrpc_timer_set_for_resend,
rxrpc_timer_set_for_send,
#define rxrpc_timer_traces \
EM(rxrpc_timer_begin, "Begin ") \
EM(rxrpc_timer_expired, "*EXPR*") \
+ EM(rxrpc_timer_exp_ack, "ExpAck") \
+ EM(rxrpc_timer_exp_hard, "ExpHrd") \
+ EM(rxrpc_timer_exp_idle, "ExpIdl") \
+ EM(rxrpc_timer_exp_normal, "ExpNml") \
+ EM(rxrpc_timer_exp_ping, "ExpPng") \
+ EM(rxrpc_timer_exp_resend, "ExpRsn") \
EM(rxrpc_timer_init_for_reply, "IniRpl") \
EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
+ EM(rxrpc_timer_restart, "Restrt") \
EM(rxrpc_timer_set_for_ack, "SetAck") \
+ EM(rxrpc_timer_set_for_hard, "SetHrd") \
+ EM(rxrpc_timer_set_for_idle, "SetIdl") \
+ EM(rxrpc_timer_set_for_normal, "SetNml") \
EM(rxrpc_timer_set_for_ping, "SetPng") \
EM(rxrpc_timer_set_for_resend, "SetRTx") \
- E_(rxrpc_timer_set_for_send, "SetTx ")
+ E_(rxrpc_timer_set_for_send, "SetSnd")
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
TRACE_EVENT(rxrpc_timer,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- ktime_t now, unsigned long now_j),
+ unsigned long now),
- TP_ARGS(call, why, now, now_j),
+ TP_ARGS(call, why, now),
TP_STRUCT__entry(
__field(struct rxrpc_call *, call )
__field(enum rxrpc_timer_trace, why )
- __field_struct(ktime_t, now )
- __field_struct(ktime_t, expire_at )
- __field_struct(ktime_t, ack_at )
- __field_struct(ktime_t, resend_at )
- __field(unsigned long, now_j )
- __field(unsigned long, timer )
+ __field(long, now )
+ __field(long, ack_at )
+ __field(long, resend_at )
+ __field(long, ping_at )
+ __field(long, expect_rx_by )
+ __field(long, expect_req_by )
+ __field(long, expect_term_by )
+ __field(long, timer )
),
TP_fast_assign(
- __entry->call = call;
- __entry->why = why;
- __entry->now = now;
- __entry->expire_at = call->expire_at;
- __entry->ack_at = call->ack_at;
- __entry->resend_at = call->resend_at;
- __entry->now_j = now_j;
- __entry->timer = call->timer.expires;
+ __entry->call = call;
+ __entry->why = why;
+ __entry->now = now;
+ __entry->ack_at = call->ack_at;
+ __entry->resend_at = call->resend_at;
+ __entry->expect_rx_by = call->expect_rx_by;
+ __entry->expect_req_by = call->expect_req_by;
+ __entry->expect_term_by = call->expect_term_by;
+ __entry->timer = call->timer.expires;
),
- TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
+ TP_printk("c=%p %s a=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_timer_traces),
- ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
- ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
- ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
- __entry->timer - __entry->now_j)
+ __entry->ack_at - __entry->now,
+ __entry->resend_at - __entry->now,
+ __entry->expect_rx_by - __entry->now,
+ __entry->expect_req_by - __entry->now,
+ __entry->expect_term_by - __entry->now,
+ __entry->timer - __entry->now)
);
TRACE_EVENT(rxrpc_rx_lose,
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
+ RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
RXRPC__SUPPORTED
};
enum rxrpc_call_event {
RXRPC_CALL_EV_ACK, /* need to generate ACK */
RXRPC_CALL_EV_ABORT, /* need to generate abort */
- RXRPC_CALL_EV_TIMER, /* Timer expired */
RXRPC_CALL_EV_RESEND, /* Tx resend required */
RXRPC_CALL_EV_PING, /* Ping send required */
+ RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
};
/*
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */
struct mutex user_mutex; /* User access mutex */
- ktime_t ack_at; /* When deferred ACK needs to happen */
- ktime_t resend_at; /* When next resend needs to happen */
- ktime_t ping_at; /* When next to send a ping */
- ktime_t expire_at; /* When the call times out */
+ unsigned long ack_at; /* When deferred ACK needs to happen */
+ unsigned long resend_at; /* When next resend needs to happen */
+ unsigned long ping_at; /* When next to send a ping */
+ unsigned long expect_rx_by; /* When we expect to get a packet by */
+ unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
+ unsigned long expect_term_by; /* When we expect call termination by */
+ u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
+ u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
struct timer_list timer; /* Combined event timer */
struct work_struct processor; /* Event processor */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
/*
* call_event.c
*/
-void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
-void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
enum rxrpc_propose_ack_trace);
void rxrpc_process_call(struct work_struct *);
+static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
+ unsigned long expire_at,
+ unsigned long now,
+ enum rxrpc_timer_trace why)
+{
+ trace_rxrpc_timer(call, why, now);
+ timer_reduce(&call->timer, expire_at);
+}
+
/*
* call_object.c
*/
*/
extern unsigned int rxrpc_max_client_connections;
extern unsigned int rxrpc_reap_client_connections;
-extern unsigned int rxrpc_conn_idle_client_expiry;
-extern unsigned int rxrpc_conn_idle_client_fast_expiry;
+extern unsigned long rxrpc_conn_idle_client_expiry;
+extern unsigned long rxrpc_conn_idle_client_fast_expiry;
extern struct idr rxrpc_client_conn_ids;
void rxrpc_destroy_client_conn_ids(void);
* misc.c
*/
extern unsigned int rxrpc_max_backlog __read_mostly;
-extern unsigned int rxrpc_requested_ack_delay;
-extern unsigned int rxrpc_soft_ack_delay;
-extern unsigned int rxrpc_idle_ack_delay;
+extern unsigned long rxrpc_requested_ack_delay;
+extern unsigned long rxrpc_soft_ack_delay;
+extern unsigned long rxrpc_idle_ack_delay;
extern unsigned int rxrpc_rx_window_size;
extern unsigned int rxrpc_rx_mtu;
extern unsigned int rxrpc_rx_jumbo_max;
-extern unsigned int rxrpc_resend_timeout;
+extern unsigned long rxrpc_resend_timeout;
extern const s8 rxrpc_ack_priority[];
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-/*
- * Set the timer
- */
-void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- ktime_t now)
-{
- unsigned long t_j, now_j = jiffies;
- ktime_t t;
- bool queue = false;
-
- if (call->state < RXRPC_CALL_COMPLETE) {
- t = call->expire_at;
- if (!ktime_after(t, now)) {
- trace_rxrpc_timer(call, why, now, now_j);
- queue = true;
- goto out;
- }
-
- if (!ktime_after(call->resend_at, now)) {
- call->resend_at = call->expire_at;
- if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
- queue = true;
- } else if (ktime_before(call->resend_at, t)) {
- t = call->resend_at;
- }
-
- if (!ktime_after(call->ack_at, now)) {
- call->ack_at = call->expire_at;
- if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
- queue = true;
- } else if (ktime_before(call->ack_at, t)) {
- t = call->ack_at;
- }
-
- if (!ktime_after(call->ping_at, now)) {
- call->ping_at = call->expire_at;
- if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
- queue = true;
- } else if (ktime_before(call->ping_at, t)) {
- t = call->ping_at;
- }
-
- t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
- t_j += jiffies;
-
- /* We have to make sure that the calculated jiffies value falls
- * at or after the nsec value, or we may loop ceaselessly
- * because the timer times out, but we haven't reached the nsec
- * timeout yet.
- */
- t_j++;
-
- if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
- mod_timer(&call->timer, t_j);
- trace_rxrpc_timer(call, why, now, now_j);
- }
- }
-
-out:
- if (queue)
- rxrpc_queue_call(call);
-}
-
-/*
- * Set the timer
- */
-void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- ktime_t now)
-{
- read_lock_bh(&call->state_lock);
- __rxrpc_set_timer(call, why, now);
- read_unlock_bh(&call->state_lock);
-}
-
/*
* Propose a PING ACK be sent.
*/
!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
rxrpc_queue_call(call);
} else {
- ktime_t now = ktime_get_real();
- ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);
+ unsigned long now = jiffies;
+ unsigned long ping_at = now + rxrpc_idle_ack_delay;
- if (ktime_before(ping_at, call->ping_at)) {
- call->ping_at = ping_at;
- rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
+ if (time_before(ping_at, call->ping_at)) {
+ WRITE_ONCE(call->ping_at, ping_at);
+ rxrpc_reduce_call_timer(call, ping_at, now,
+ rxrpc_timer_set_for_ping);
}
}
}
enum rxrpc_propose_ack_trace why)
{
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
- unsigned int expiry = rxrpc_soft_ack_delay;
- ktime_t now, ack_at;
+ unsigned long now, ack_at, expiry = rxrpc_soft_ack_delay;
s8 prior = rxrpc_ack_priority[ack_reason];
/* Pings are handled specially because we don't want to accidentally
background)
rxrpc_queue_call(call);
} else {
- now = ktime_get_real();
- ack_at = ktime_add_ms(now, expiry);
- if (ktime_before(ack_at, call->ack_at)) {
- call->ack_at = ack_at;
- rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
+ now = jiffies;
+ ack_at = jiffies + expiry;
+ if (time_before(ack_at, call->ack_at)) {
+ WRITE_ONCE(call->ack_at, ack_at);
+ rxrpc_reduce_call_timer(call, ack_at, now,
+ rxrpc_timer_set_for_ack);
}
}
/*
* Perform retransmission of NAK'd and unack'd packets.
*/
-static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
+static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
{
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
+ unsigned long resend_at;
rxrpc_seq_t cursor, seq, top;
- ktime_t max_age, oldest, ack_ts;
+ ktime_t now, max_age, oldest, ack_ts;
int ix;
u8 annotation, anno_type, retrans = 0, unacked = 0;
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
- max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
+ now = ktime_get_real();
+ max_age = ktime_sub_ms(now, rxrpc_resend_timeout * 1000 / HZ);
spin_lock_bh(&call->lock);
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
}
- call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
+ resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
+ resend_at += jiffies + rxrpc_resend_timeout;
+ WRITE_ONCE(call->resend_at, resend_at);
if (unacked)
rxrpc_congestion_timeout(call);
* retransmitting data.
*/
if (!retrans) {
- rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
+ rxrpc_reduce_call_timer(call, resend_at, now,
+ rxrpc_timer_set_for_resend);
spin_unlock_bh(&call->lock);
ack_ts = ktime_sub(now, call->acks_latest_ts);
if (ktime_to_ns(ack_ts) < call->peer->rtt)
{
struct rxrpc_call *call =
container_of(work, struct rxrpc_call, processor);
- ktime_t now;
+ unsigned long now, next, t;
rxrpc_see_call(call);
goto out_put;
}
- now = ktime_get_real();
- if (ktime_before(call->expire_at, now)) {
+ /* Work out if any timeouts tripped */
+ now = jiffies;
+ t = READ_ONCE(call->expect_rx_by);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
+ set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+ }
+
+ t = READ_ONCE(call->expect_req_by);
+ if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
+ time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
+ set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+ }
+
+ t = READ_ONCE(call->expect_term_by);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
+ set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+ }
+
+ t = READ_ONCE(call->ack_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
+ cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_ACK, &call->events);
+ }
+
+ t = READ_ONCE(call->ping_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
+ cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_PING, &call->events);
+ }
+
+ t = READ_ONCE(call->resend_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
+ cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_RESEND, &call->events);
+ }
+
+ /* Process events */
+ if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
goto recheck_state;
goto recheck_state;
}
- rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
+ /* Make sure the timer is restarted */
+ next = call->expect_rx_by;
+
+#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
+
+ set(call->expect_req_by);
+ set(call->expect_term_by);
+ set(call->ack_at);
+ set(call->resend_at);
+ set(call->ping_at);
+
+ now = jiffies;
+ if (time_after_eq(now, next))
+ goto recheck_state;
+
+ rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
/* other events may have been raised since we started checking */
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
_enter("%d", call->debug_id);
- if (call->state < RXRPC_CALL_COMPLETE)
- rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
+ rxrpc_queue_call(call);
+ }
}
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
atomic_set(&call->usage, 1);
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
call->tx_total_len = -1;
+ call->next_rx_timo = 20 * HZ;
+ call->next_req_timo = 1 * HZ;
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
*/
static void rxrpc_start_call_timer(struct rxrpc_call *call)
{
- ktime_t now = ktime_get_real(), expire_at;
-
- expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
- call->expire_at = expire_at;
- call->ack_at = expire_at;
- call->ping_at = expire_at;
- call->resend_at = expire_at;
- call->timer.expires = jiffies + LONG_MAX / 2;
- rxrpc_set_timer(call, rxrpc_timer_begin, now);
+ unsigned long now = jiffies;
+ unsigned long j = now + MAX_JIFFY_OFFSET;
+
+ call->ack_at = j;
+ call->resend_at = j;
+ call->ping_at = j;
+ call->expect_rx_by = j;
+ call->expect_req_by = j;
+ call->expect_term_by = j;
+ call->timer.expires = now;
}
/*
__read_mostly unsigned int rxrpc_max_client_connections = 1000;
__read_mostly unsigned int rxrpc_reap_client_connections = 900;
-__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
-__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
+__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
+__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
/*
* We use machine-unique IDs for our client connections.
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{
struct rxrpc_ack_summary summary = { 0 };
+ unsigned long now, timo;
rxrpc_seq_t top = READ_ONCE(call->tx_top);
if (call->ackr_reason) {
spin_lock_bh(&call->lock);
call->ackr_reason = 0;
- call->resend_at = call->expire_at;
- call->ack_at = call->expire_at;
spin_unlock_bh(&call->lock);
- rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
- ktime_get_real());
+ now = jiffies;
+ timo = now + MAX_JIFFY_OFFSET;
+ WRITE_ONCE(call->resend_at, timo);
+ WRITE_ONCE(call->ack_at, timo);
+ trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
if (state >= RXRPC_CALL_COMPLETE)
return;
+ if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ unsigned long timo = READ_ONCE(call->next_req_timo);
+ unsigned long now, expect_req_by;
+
+ if (timo) {
+ now = jiffies;
+ expect_req_by = now + timo;
+ WRITE_ONCE(call->expect_req_by, expect_req_by);
+ rxrpc_reduce_call_timer(call, expect_req_by, now,
+ rxrpc_timer_set_for_idle);
+ }
+ }
+
/* Received data implicitly ACKs all of the request packets we sent
* when we're acting as a client.
*/
struct sk_buff *skb, u16 skew)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned long timo;
_enter("%p,%p", call, skb);
+ timo = READ_ONCE(call->next_rx_timo);
+ if (timo) {
+ unsigned long now = jiffies, expect_rx_by;
+
+ expect_rx_by = jiffies + timo;
+ WRITE_ONCE(call->expect_rx_by, expect_rx_by);
+ rxrpc_reduce_call_timer(call, expect_rx_by, now,
+ rxrpc_timer_set_for_normal);
+ }
+
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA:
rxrpc_input_data(call, skb, skew);
*/
unsigned int rxrpc_max_backlog __read_mostly = 10;
-/*
- * Maximum lifetime of a call (in mx).
- */
-unsigned int rxrpc_max_call_lifetime = 60 * 1000;
-
/*
* How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in ms).
+ * packet with RXRPC_REQUEST_ACK set (in jiffies).
*/
-unsigned int rxrpc_requested_ack_delay = 1;
+unsigned long rxrpc_requested_ack_delay = 1;
/*
- * How long to wait before scheduling an ACK with subtype DELAY (in ms).
+ * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
*
* We use this when we've received new data packets. If those packets aren't
* all consumed within this time we will send a DELAY ACK if an ACK was not
* requested to let the sender know it doesn't need to resend.
*/
-unsigned int rxrpc_soft_ack_delay = 1 * 1000;
+unsigned long rxrpc_soft_ack_delay = HZ;
/*
- * How long to wait before scheduling an ACK with subtype IDLE (in ms).
+ * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
*
* We use this when we've consumed some previously soft-ACK'd packets when
* further packets aren't immediately received to decide when to send an IDLE
* ACK let the other end know that it can free up its Tx buffer space.
*/
-unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
+unsigned long rxrpc_idle_ack_delay = HZ / 2;
/*
* Receive window size in packets. This indicates the maximum number of
/*
* Time till packet resend (in milliseconds).
*/
-unsigned int rxrpc_resend_timeout = 4 * 1000;
+unsigned long rxrpc_resend_timeout = 4 * HZ;
const s8 rxrpc_ack_priority[] = {
[0] = 0,
case RXRPC_CALL_SERVER_RECV_REQUEST:
call->tx_phase = true;
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
- call->ack_at = call->expire_at;
+ call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
write_unlock_bh(&call->state_lock);
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
rxrpc_propose_ack_processing_op);
rxrpc_notify_end_tx_t notify_end_tx)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned long now;
rxrpc_seq_t seq = sp->hdr.seq;
int ret, ix;
u8 annotation = RXRPC_TX_ANNO_UNACK;
break;
case RXRPC_CALL_SERVER_ACK_REQUEST:
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
- call->ack_at = call->expire_at;
+ now = jiffies;
+ WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
if (call->ackr_reason == RXRPC_ACK_DELAY)
call->ackr_reason = 0;
- __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
- ktime_get_real());
+ trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
if (!last)
break;
/* Fall through */
_debug("need instant resend %d", ret);
rxrpc_instant_resend(call, ix);
} else {
- ktime_t now = ktime_get_real(), resend_at;
+ unsigned long now = jiffies, resend_at;
- resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
-
- if (ktime_before(resend_at, call->resend_at)) {
- call->resend_at = resend_at;
- rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
- }
+ resend_at = now + rxrpc_resend_timeout;
+ WRITE_ONCE(call->resend_at, resend_at);
+ rxrpc_reduce_call_timer(call, resend_at, now,
+ rxrpc_timer_set_for_send);
}
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
return -EINVAL;
break;
+ case RXRPC_SET_CALL_TIMEOUT:
+ if (len & 3 || len < 4 || len > 12)
+ return -EINVAL;
+ memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
+ p->call.nr_timeouts = len / 4;
+ if (p->call.timeouts.hard > INT_MAX / HZ)
+ return -ERANGE;
+ if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
+ return -ERANGE;
+ if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
+ return -ERANGE;
+ break;
+
default:
return -EINVAL;
}
{
enum rxrpc_call_state state;
struct rxrpc_call *call;
+ unsigned long now, j;
int ret;
struct rxrpc_send_params p = {
.call.tx_total_len = -1,
.call.user_call_ID = 0,
+ .call.nr_timeouts = 0,
.abort_code = 0,
.command = RXRPC_CMD_SEND_DATA,
.exclusive = false,
}
}
+ switch (p.call.nr_timeouts) {
+ case 3:
+ j = msecs_to_jiffies(p.call.timeouts.normal);
+ if (p.call.timeouts.normal > 0 && j == 0)
+ j = 1;
+ WRITE_ONCE(call->next_rx_timo, j);
+ /* Fall through */
+ case 2:
+ j = msecs_to_jiffies(p.call.timeouts.idle);
+ if (p.call.timeouts.idle > 0 && j == 0)
+ j = 1;
+ WRITE_ONCE(call->next_req_timo, j);
+ /* Fall through */
+ case 1:
+ if (p.call.timeouts.hard > 0) {
+ j = msecs_to_jiffies(p.call.timeouts.hard);
+ now = jiffies;
+ j += now;
+ WRITE_ONCE(call->expect_term_by, j);
+ rxrpc_reduce_call_timer(call, j, now,
+ rxrpc_timer_set_for_hard);
+ }
+ break;
+ }
+
state = READ_ONCE(call->state);
_debug("CALL %d USR %lx ST %d on CONN %p",
call->debug_id, call->user_call_ID, state, call->conn);
static const unsigned int thirtytwo = 32;
static const unsigned int n_65535 = 65535;
static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
+static const unsigned long one_jiffy = 1;
+static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
/*
* RxRPC operating parameters.
* information on the individual parameters.
*/
static struct ctl_table rxrpc_sysctl_table[] = {
- /* Values measured in milliseconds */
+ /* Values measured in milliseconds but used in jiffies */
{
.procname = "req_ack_delay",
.data = &rxrpc_requested_ack_delay,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&zero,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "soft_ack_delay",
.data = &rxrpc_soft_ack_delay,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_ack_delay",
.data = &rxrpc_idle_ack_delay,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
- },
- {
- .procname = "resend_timeout",
- .data = &rxrpc_resend_timeout,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_conn_expiry",
.data = &rxrpc_conn_idle_client_expiry,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_conn_fast_expiry",
.data = &rxrpc_conn_idle_client_fast_expiry,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
-
- /* Values measured in seconds but used in jiffies */
{
- .procname = "max_call_lifetime",
- .data = &rxrpc_max_call_lifetime,
- .maxlen = sizeof(unsigned int),
+ .procname = "resend_timeout",
+ .data = &rxrpc_resend_timeout,
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
/* Non-time values */