}
#endif /* CONFIG_MEMCG */
-enum {
- UNDER_LIMIT,
- SOFT_LIMIT,
- OVER_LIMIT,
-};
-
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
struct sock;
void sock_update_memcg(struct sock *sk);
void sock_release_memcg(struct sock *sk);
+bool mem_cgroup_charge_skmem(struct cg_proto *proto, unsigned int nr_pages);
+void mem_cgroup_uncharge_skmem(struct cg_proto *proto, unsigned int nr_pages);
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
+static inline bool mem_cgroup_under_socket_pressure(struct cg_proto *proto)
+{
+ return proto->memory_pressure;
+}
+#else
+static inline bool mem_cgroup_under_pressure(struct cg_proto *proto)
+{
+ return false;
+}
+#endif
#ifdef CONFIG_MEMCG_KMEM
extern struct static_key memcg_kmem_enabled_key;
if (!sk->sk_prot->memory_pressure)
return false;
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return !!sk->sk_cgrp->memory_pressure;
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
+ mem_cgroup_under_socket_pressure(sk->sk_cgrp))
+ return true;
return !!*sk->sk_prot->memory_pressure;
}
if (*memory_pressure)
*memory_pressure = 0;
-
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- sk->sk_cgrp->memory_pressure = 0;
}
static inline void sk_enter_memory_pressure(struct sock *sk)
if (!sk->sk_prot->enter_memory_pressure)
return;
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- sk->sk_cgrp->memory_pressure = 1;
-
sk->sk_prot->enter_memory_pressure(sk);
}
static inline long sk_prot_mem_limits(const struct sock *sk, int index)
{
- long limit = sk->sk_prot->sysctl_mem[index];
-
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- limit = min_t(long, limit, sk->sk_cgrp->memory_allocated.limit);
-
- return limit;
-}
-
-static inline void memcg_memory_allocated_add(struct cg_proto *prot,
- unsigned long amt,
- int *parent_status)
-{
- struct page_counter *counter;
-
- if (page_counter_try_charge(&prot->memory_allocated, amt, &counter))
- return;
-
- page_counter_charge(&prot->memory_allocated, amt);
- *parent_status = OVER_LIMIT;
-}
-
-static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
- unsigned long amt)
-{
- page_counter_uncharge(&prot->memory_allocated, amt);
+ return sk->sk_prot->sysctl_mem[index];
}
static inline long
sk_memory_allocated(const struct sock *sk)
{
- struct proto *prot = sk->sk_prot;
-
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return page_counter_read(&sk->sk_cgrp->memory_allocated);
-
- return atomic_long_read(prot->memory_allocated);
+ return atomic_long_read(sk->sk_prot->memory_allocated);
}
static inline long
-sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
+sk_memory_allocated_add(struct sock *sk, int amt)
{
- struct proto *prot = sk->sk_prot;
-
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
- memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
- /* update the root cgroup regardless */
- atomic_long_add_return(amt, prot->memory_allocated);
- return page_counter_read(&sk->sk_cgrp->memory_allocated);
- }
-
- return atomic_long_add_return(amt, prot->memory_allocated);
+ return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
}
static inline void
sk_memory_allocated_sub(struct sock *sk, int amt)
{
- struct proto *prot = sk->sk_prot;
-
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- memcg_memory_allocated_sub(sk->sk_cgrp, amt);
-
- atomic_long_sub(amt, prot->memory_allocated);
+ atomic_long_sub(amt, sk->sk_prot->memory_allocated);
}
static inline void sk_sockets_allocated_dec(struct sock *sk)
/* optimized version of sk_under_memory_pressure() for TCP sockets */
static inline bool tcp_under_memory_pressure(const struct sock *sk)
{
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return !!sk->sk_cgrp->memory_pressure;
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
+ mem_cgroup_under_socket_pressure(sk->sk_cgrp))
+ return true;
return tcp_memory_pressure;
}
}
EXPORT_SYMBOL(tcp_proto_cgroup);
+/**
+ * mem_cgroup_charge_skmem - charge socket memory
+ * @proto: proto to charge
+ * @nr_pages: number of pages to charge
+ *
+ * Charges @nr_pages to @proto. Returns %true if the charge fit within
+ * @proto's configured limit, %false if the charge had to be forced.
+ */
+bool mem_cgroup_charge_skmem(struct cg_proto *proto, unsigned int nr_pages)
+{
+ struct page_counter *counter;
+
+ if (page_counter_try_charge(&proto->memory_allocated,
+ nr_pages, &counter)) {
+ proto->memory_pressure = 0;
+ return true;
+ }
+ page_counter_charge(&proto->memory_allocated, nr_pages);
+ proto->memory_pressure = 1;
+ return false;
+}
+
+/**
+ * mem_cgroup_uncharge_skmem - uncharge socket memory
+ * @proto - proto to uncharge
+ * @nr_pages - number of pages to uncharge
+ */
+void mem_cgroup_uncharge_skmem(struct cg_proto *proto, unsigned int nr_pages)
+{
+ page_counter_uncharge(&proto->memory_allocated, nr_pages);
+}
+
#endif
#ifdef CONFIG_MEMCG_KMEM
struct proto *prot = sk->sk_prot;
int amt = sk_mem_pages(size);
long allocated;
- int parent_status = UNDER_LIMIT;
sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
- allocated = sk_memory_allocated_add(sk, amt, &parent_status);
+ allocated = sk_memory_allocated_add(sk, amt);
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
+ !mem_cgroup_charge_skmem(sk->sk_cgrp, amt))
+ goto suppress_allocation;
/* Under limit. */
- if (parent_status == UNDER_LIMIT &&
- allocated <= sk_prot_mem_limits(sk, 0)) {
+ if (allocated <= sk_prot_mem_limits(sk, 0)) {
sk_leave_memory_pressure(sk);
return 1;
}
- /* Under pressure. (we or our parents) */
- if ((parent_status > SOFT_LIMIT) ||
- allocated > sk_prot_mem_limits(sk, 1))
+ /* Under pressure. */
+ if (allocated > sk_prot_mem_limits(sk, 1))
sk_enter_memory_pressure(sk);
- /* Over hard limit (we or our parents) */
- if ((parent_status == OVER_LIMIT) ||
- (allocated > sk_prot_mem_limits(sk, 2)))
+ /* Over hard limit. */
+ if (allocated > sk_prot_mem_limits(sk, 2))
goto suppress_allocation;
/* guarantee minimum buffer size under pressure */
sk_memory_allocated_sub(sk, amt);
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ mem_cgroup_uncharge_skmem(sk->sk_cgrp, amt);
+
return 0;
}
EXPORT_SYMBOL(__sk_mem_schedule);
sk_memory_allocated_sub(sk, amount);
sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ mem_cgroup_uncharge_skmem(sk->sk_cgrp, amount);
+
if (sk_under_memory_pressure(sk) &&
(sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
sk_leave_memory_pressure(sk);
*/
void sk_forced_mem_schedule(struct sock *sk, int size)
{
- int amt, status;
+ int amt;
if (size <= sk->sk_forward_alloc)
return;
amt = sk_mem_pages(size);
sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
- sk_memory_allocated_add(sk, amt, &status);
+ sk_memory_allocated_add(sk, amt);
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ mem_cgroup_charge_skmem(sk->sk_cgrp, amt);
}
/* Send a FIN. The caller locks the socket for us.