mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-03 18:20:25 +02:00 
			
		
		
		
	arch: use memblock_alloc() instead of memblock_alloc_from(size, align, 0)
The last parameter of memblock_alloc_from() is the lower limit for the memory allocation. When it is 0, the call is equivalent to memblock_alloc(). Link: http://lkml.kernel.org/r/1548057848-15136-13-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Paul Burton <paul.burton@mips.com> # MIPS part Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Guo Ren <ren_guo@c-sky.com> [c-sky] Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Juergen Gross <jgross@suse.com> [Xen] Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Petr Mladek <pmladek@suse.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Rob Herring <robh@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									c366ea89fa
								
							
						
					
					
						commit
						9415673e3e
					
				
					 8 changed files with 13 additions and 14 deletions
				
			
		| 
						 | 
				
			
			@ -331,7 +331,7 @@ cia_prepare_tbia_workaround(int window)
 | 
			
		|||
	long i;
 | 
			
		||||
 | 
			
		||||
	/* Use minimal 1K map. */
 | 
			
		||||
	ppte = memblock_alloc_from(CIA_BROKEN_TBIA_SIZE, 32768, 0);
 | 
			
		||||
	ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768);
 | 
			
		||||
	pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -87,13 +87,13 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
 | 
			
		|||
		printk("%s: couldn't allocate arena ptes from node %d\n"
 | 
			
		||||
		       "    falling back to system-wide allocation\n",
 | 
			
		||||
		       __func__, nid);
 | 
			
		||||
		arena->ptes = memblock_alloc_from(mem_size, align, 0);
 | 
			
		||||
		arena->ptes = memblock_alloc(mem_size, align);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
#else /* CONFIG_DISCONTIGMEM */
 | 
			
		||||
 | 
			
		||||
	arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
 | 
			
		||||
	arena->ptes = memblock_alloc_from(mem_size, align, 0);
 | 
			
		||||
	arena->ptes = memblock_alloc(mem_size, align);
 | 
			
		||||
 | 
			
		||||
#endif /* CONFIG_DISCONTIGMEM */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -293,7 +293,7 @@ move_initrd(unsigned long mem_limit)
 | 
			
		|||
	unsigned long size;
 | 
			
		||||
 | 
			
		||||
	size = initrd_end - initrd_start;
 | 
			
		||||
	start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0);
 | 
			
		||||
	start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE);
 | 
			
		||||
	if (!start || __pa(start) + size > mem_limit) {
 | 
			
		||||
		initrd_start = initrd_end = 0;
 | 
			
		||||
		return NULL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1835,8 +1835,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
 | 
			
		|||
/* Caller prevents this from being called after init */
 | 
			
		||||
static void * __ref mca_bootmem(void)
 | 
			
		||||
{
 | 
			
		||||
	return memblock_alloc_from(sizeof(struct ia64_mca_cpu),
 | 
			
		||||
				   KERNEL_STACK_SIZE, 0);
 | 
			
		||||
	return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Do per-CPU MCA-related initialization.  */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2293,7 +2293,7 @@ void __init trap_init(void)
 | 
			
		|||
		phys_addr_t ebase_pa;
 | 
			
		||||
 | 
			
		||||
		ebase = (unsigned long)
 | 
			
		||||
			memblock_alloc_from(size, 1 << fls(size), 0);
 | 
			
		||||
			memblock_alloc(size, 1 << fls(size));
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Try to ensure ebase resides in KSeg0 if possible.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -32,7 +32,7 @@ void * __init prom_early_alloc(unsigned long size)
 | 
			
		|||
{
 | 
			
		||||
	void *ret;
 | 
			
		||||
 | 
			
		||||
	ret = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
 | 
			
		||||
	ret = memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
	if (ret != NULL)
 | 
			
		||||
		memset(ret, 0, size);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -264,7 +264,7 @@ void __init mem_init(void)
 | 
			
		|||
	i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
 | 
			
		||||
	i += 1;
 | 
			
		||||
	sparc_valid_addr_bitmap = (unsigned long *)
 | 
			
		||||
		memblock_alloc_from(i << 2, SMP_CACHE_BYTES, 0UL);
 | 
			
		||||
		memblock_alloc(i << 2, SMP_CACHE_BYTES);
 | 
			
		||||
 | 
			
		||||
	if (sparc_valid_addr_bitmap == NULL) {
 | 
			
		||||
		prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -303,13 +303,13 @@ static void __init srmmu_nocache_init(void)
 | 
			
		|||
 | 
			
		||||
	bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
 | 
			
		||||
 | 
			
		||||
	srmmu_nocache_pool = memblock_alloc_from(srmmu_nocache_size,
 | 
			
		||||
						 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
 | 
			
		||||
	srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
 | 
			
		||||
					    SRMMU_NOCACHE_ALIGN_MAX);
 | 
			
		||||
	memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
 | 
			
		||||
 | 
			
		||||
	srmmu_nocache_bitmap =
 | 
			
		||||
		memblock_alloc_from(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
 | 
			
		||||
				    SMP_CACHE_BYTES, 0UL);
 | 
			
		||||
		memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
 | 
			
		||||
			       SMP_CACHE_BYTES);
 | 
			
		||||
	bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
 | 
			
		||||
 | 
			
		||||
	srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
 | 
			
		||||
| 
						 | 
				
			
			@ -467,7 +467,7 @@ static void __init sparc_context_init(int numctx)
 | 
			
		|||
	unsigned long size;
 | 
			
		||||
 | 
			
		||||
	size = numctx * sizeof(struct ctx_list);
 | 
			
		||||
	ctx_list_pool = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
 | 
			
		||||
	ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
 | 
			
		||||
	for (ctx = 0; ctx < numctx; ctx++) {
 | 
			
		||||
		struct ctx_list *clist;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue