mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	memcgroup: use vmalloc for mem_cgroup allocation
On ia64, this kmalloc() requires order-4 pages. But this is not necessary to be physically contiguous. For big mem_cgroup, vmalloc is better. For small ones, kmalloc is used. [akpm@linux-foundation.org: simplification] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Pavel Emelyanov <xemul@openvz.org> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									4a56d02e34
								
							
						
					
					
						commit
						3332794878
					
				
					 1 changed files with 29 additions and 6 deletions
				
			
		| 
						 | 
				
			
			@ -31,6 +31,7 @@
 | 
			
		|||
#include <linux/spinlock.h>
 | 
			
		||||
#include <linux/fs.h>
 | 
			
		||||
#include <linux/seq_file.h>
 | 
			
		||||
#include <linux/vmalloc.h>
 | 
			
		||||
 | 
			
		||||
#include <asm/uaccess.h>
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -983,6 +984,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
 | 
			
		|||
	kfree(mem->info.nodeinfo[node]);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct mem_cgroup *mem_cgroup_alloc(void)
 | 
			
		||||
{
 | 
			
		||||
	struct mem_cgroup *mem;
 | 
			
		||||
 | 
			
		||||
	if (sizeof(*mem) < PAGE_SIZE)
 | 
			
		||||
		mem = kmalloc(sizeof(*mem), GFP_KERNEL);
 | 
			
		||||
	else
 | 
			
		||||
		mem = vmalloc(sizeof(*mem));
 | 
			
		||||
 | 
			
		||||
	if (mem)
 | 
			
		||||
		memset(mem, 0, sizeof(*mem));
 | 
			
		||||
	return mem;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void mem_cgroup_free(struct mem_cgroup *mem)
 | 
			
		||||
{
 | 
			
		||||
	if (sizeof(*mem) < PAGE_SIZE)
 | 
			
		||||
		kfree(mem);
 | 
			
		||||
	else
 | 
			
		||||
		vfree(mem);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
static struct cgroup_subsys_state *
 | 
			
		||||
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -993,12 +1017,11 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 | 
			
		|||
		mem = &init_mem_cgroup;
 | 
			
		||||
		page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
 | 
			
		||||
	} else {
 | 
			
		||||
		mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
 | 
			
		||||
		mem = mem_cgroup_alloc();
 | 
			
		||||
		if (!mem)
 | 
			
		||||
			return ERR_PTR(-ENOMEM);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (mem == NULL)
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
 | 
			
		||||
	res_counter_init(&mem->res);
 | 
			
		||||
 | 
			
		||||
	memset(&mem->info, 0, sizeof(mem->info));
 | 
			
		||||
| 
						 | 
				
			
			@ -1012,7 +1035,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 | 
			
		|||
	for_each_node_state(node, N_POSSIBLE)
 | 
			
		||||
		free_mem_cgroup_per_zone_info(mem, node);
 | 
			
		||||
	if (cont->parent != NULL)
 | 
			
		||||
		kfree(mem);
 | 
			
		||||
		mem_cgroup_free(mem);
 | 
			
		||||
	return ERR_PTR(-ENOMEM);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1032,7 +1055,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
 | 
			
		|||
	for_each_node_state(node, N_POSSIBLE)
 | 
			
		||||
		free_mem_cgroup_per_zone_info(mem, node);
 | 
			
		||||
 | 
			
		||||
	kfree(mem_cgroup_from_cont(cont));
 | 
			
		||||
	mem_cgroup_free(mem_cgroup_from_cont(cont));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int mem_cgroup_populate(struct cgroup_subsys *ss,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue