};
struct mem_cgroup_lru_info {
- struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
+ struct mem_cgroup_per_node *nodeinfo[0];
};
/*
* the counter to account for kernel memory usage.
*/
struct res_counter kmem;
- /*
- * Per cgroup active and inactive list, similar to the
- * per zone LRU lists.
- */
- struct mem_cgroup_lru_info info;
- int last_scanned_node;
-#if MAX_NUMNODES > 1
- nodemask_t scan_nodes;
- atomic_t numainfo_events;
- atomic_t numainfo_updating;
-#endif
/*
* Should the accounting and control be hierarchical, per subtree?
*/
/* Index in the kmem_cache->memcg_params->memcg_caches array */
int kmemcg_id;
#endif
+
+ int last_scanned_node;
+#if MAX_NUMNODES > 1
+ nodemask_t scan_nodes;
+ atomic_t numainfo_events;
+ atomic_t numainfo_updating;
+#endif
+ /*
+ * Per cgroup active and inactive list, similar to the
+ * per zone LRU lists.
+ *
+ * WARNING: This has to be the last element of the struct. Don't
+ * add new fields after this point.
+ */
+ struct mem_cgroup_lru_info info;
};
+static size_t memcg_size(void)
+{
+ return sizeof(struct mem_cgroup) +
+ nr_node_ids * sizeof(struct mem_cgroup_per_node);
+}
+
/* internal only representation about the status of kmem accounting. */
enum {
KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
static struct mem_cgroup_per_zone *
mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
{
+ VM_BUG_ON((unsigned)nid >= nr_node_ids);
return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
}
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *memcg;
- int size = sizeof(struct mem_cgroup);
+ size_t size = memcg_size();
- /* Can be very big if MAX_NUMNODES is very big */
+ /* Can be very big if nr_node_ids is very big */
if (size < PAGE_SIZE)
memcg = kzalloc(size, GFP_KERNEL);
else
static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
- int size = sizeof(struct mem_cgroup);
+ size_t size = memcg_size();
mem_cgroup_remove_from_trees(memcg);
free_css_id(&mem_cgroup_subsys, &memcg->css);