mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
 - a few misc things
 - the rest of MM
-  remove flex_arrays, replace with new simple radix-tree implementation
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (38 commits)
  Drop flex_arrays
  sctp: convert to genradix
  proc: commit to genradix
  generic radix trees
  selinux: convert to kvmalloc
  md: convert to kvmalloc
  openvswitch: convert to kvmalloc
  of: fix kmemleak crash caused by imbalance in early memory reservation
  mm: memblock: update comments and kernel-doc
  memblock: split checks whether a region should be skipped to a helper function
  memblock: remove memblock_{set,clear}_region_flags
  memblock: drop memblock_alloc_*_nopanic() variants
  memblock: memblock_alloc_try_nid: don't panic
  treewide: add checks for the return value of memblock_alloc*()
  swiotlb: add checks for the return value of memblock_alloc*()
  init/main: add checks for the return value of memblock_alloc*()
  mm/percpu: add checks for the return value of memblock_alloc*()
  sparc: add checks for the return value of memblock_alloc*()
  ia64: add checks for the return value of memblock_alloc*()
  arch: don't memset(0) memory returned by memblock_alloc()
  ...
			
			
This commit is contained in:
		
						commit
						a667cb7a94
					
				
					 159 changed files with 1654 additions and 1710 deletions
				
			
		| 
						 | 
				
			
			@ -1,130 +0,0 @@
 | 
			
		|||
 | 
			
		||||
===================================
 | 
			
		||||
Using flexible arrays in the kernel
 | 
			
		||||
===================================
 | 
			
		||||
 | 
			
		||||
Large contiguous memory allocations can be unreliable in the Linux kernel.
 | 
			
		||||
Kernel programmers will sometimes respond to this problem by allocating
 | 
			
		||||
pages with :c:func:`vmalloc()`.  This solution not ideal, though.  On 32-bit
 | 
			
		||||
systems, memory from vmalloc() must be mapped into a relatively small address
 | 
			
		||||
space; it's easy to run out.  On SMP systems, the page table changes required
 | 
			
		||||
by vmalloc() allocations can require expensive cross-processor interrupts on
 | 
			
		||||
all CPUs.  And, on all systems, use of space in the vmalloc() range increases
 | 
			
		||||
pressure on the translation lookaside buffer (TLB), reducing the performance
 | 
			
		||||
of the system.
 | 
			
		||||
 | 
			
		||||
In many cases, the need for memory from vmalloc() can be eliminated by piecing
 | 
			
		||||
together an array from smaller parts; the flexible array library exists to make
 | 
			
		||||
this task easier.
 | 
			
		||||
 | 
			
		||||
A flexible array holds an arbitrary (within limits) number of fixed-sized
 | 
			
		||||
objects, accessed via an integer index.  Sparse arrays are handled
 | 
			
		||||
reasonably well.  Only single-page allocations are made, so memory
 | 
			
		||||
allocation failures should be relatively rare.  The down sides are that the
 | 
			
		||||
arrays cannot be indexed directly, individual object size cannot exceed the
 | 
			
		||||
system page size, and putting data into a flexible array requires a copy
 | 
			
		||||
operation.  It's also worth noting that flexible arrays do no internal
 | 
			
		||||
locking at all; if concurrent access to an array is possible, then the
 | 
			
		||||
caller must arrange for appropriate mutual exclusion.
 | 
			
		||||
 | 
			
		||||
The creation of a flexible array is done with :c:func:`flex_array_alloc()`::
 | 
			
		||||
 | 
			
		||||
    #include <linux/flex_array.h>
 | 
			
		||||
 | 
			
		||||
    struct flex_array *flex_array_alloc(int element_size,
 | 
			
		||||
					unsigned int total,
 | 
			
		||||
					gfp_t flags);
 | 
			
		||||
 | 
			
		||||
The individual object size is provided by ``element_size``, while total is the
 | 
			
		||||
maximum number of objects which can be stored in the array.  The flags
 | 
			
		||||
argument is passed directly to the internal memory allocation calls.  With
 | 
			
		||||
the current code, using flags to ask for high memory is likely to lead to
 | 
			
		||||
notably unpleasant side effects.
 | 
			
		||||
 | 
			
		||||
It is also possible to define flexible arrays at compile time with::
 | 
			
		||||
 | 
			
		||||
    DEFINE_FLEX_ARRAY(name, element_size, total);
 | 
			
		||||
 | 
			
		||||
This macro will result in a definition of an array with the given name; the
 | 
			
		||||
element size and total will be checked for validity at compile time.
 | 
			
		||||
 | 
			
		||||
Storing data into a flexible array is accomplished with a call to
 | 
			
		||||
:c:func:`flex_array_put()`::
 | 
			
		||||
 | 
			
		||||
    int flex_array_put(struct flex_array *array, unsigned int element_nr,
 | 
			
		||||
    		       void *src, gfp_t flags);
 | 
			
		||||
 | 
			
		||||
This call will copy the data from src into the array, in the position
 | 
			
		||||
indicated by ``element_nr`` (which must be less than the maximum specified when
 | 
			
		||||
the array was created).  If any memory allocations must be performed, flags
 | 
			
		||||
will be used.  The return value is zero on success, a negative error code
 | 
			
		||||
otherwise.
 | 
			
		||||
 | 
			
		||||
There might possibly be a need to store data into a flexible array while
 | 
			
		||||
running in some sort of atomic context; in this situation, sleeping in the
 | 
			
		||||
memory allocator would be a bad thing.  That can be avoided by using
 | 
			
		||||
``GFP_ATOMIC`` for the flags value, but, often, there is a better way.  The
 | 
			
		||||
trick is to ensure that any needed memory allocations are done before
 | 
			
		||||
entering atomic context, using :c:func:`flex_array_prealloc()`::
 | 
			
		||||
 | 
			
		||||
    int flex_array_prealloc(struct flex_array *array, unsigned int start,
 | 
			
		||||
			    unsigned int nr_elements, gfp_t flags);
 | 
			
		||||
 | 
			
		||||
This function will ensure that memory for the elements indexed in the range
 | 
			
		||||
defined by ``start`` and ``nr_elements`` has been allocated.  Thereafter, a
 | 
			
		||||
``flex_array_put()`` call on an element in that range is guaranteed not to
 | 
			
		||||
block.
 | 
			
		||||
 | 
			
		||||
Getting data back out of the array is done with :c:func:`flex_array_get()`::
 | 
			
		||||
 | 
			
		||||
    void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
 | 
			
		||||
 | 
			
		||||
The return value is a pointer to the data element, or NULL if that
 | 
			
		||||
particular element has never been allocated.
 | 
			
		||||
 | 
			
		||||
Note that it is possible to get back a valid pointer for an element which
 | 
			
		||||
has never been stored in the array.  Memory for array elements is allocated
 | 
			
		||||
one page at a time; a single allocation could provide memory for several
 | 
			
		||||
adjacent elements.  Flexible array elements are normally initialized to the
 | 
			
		||||
value ``FLEX_ARRAY_FREE`` (defined as 0x6c in <linux/poison.h>), so errors
 | 
			
		||||
involving that number probably result from use of unstored array entries.
 | 
			
		||||
Note that, if array elements are allocated with ``__GFP_ZERO``, they will be
 | 
			
		||||
initialized to zero and this poisoning will not happen.
 | 
			
		||||
 | 
			
		||||
Individual elements in the array can be cleared with
 | 
			
		||||
:c:func:`flex_array_clear()`::
 | 
			
		||||
 | 
			
		||||
    int flex_array_clear(struct flex_array *array, unsigned int element_nr);
 | 
			
		||||
 | 
			
		||||
This function will set the given element to ``FLEX_ARRAY_FREE`` and return
 | 
			
		||||
zero.  If storage for the indicated element is not allocated for the array,
 | 
			
		||||
``flex_array_clear()`` will return ``-EINVAL`` instead.  Note that clearing an
 | 
			
		||||
element does not release the storage associated with it; to reduce the
 | 
			
		||||
allocated size of an array, call :c:func:`flex_array_shrink()`::
 | 
			
		||||
 | 
			
		||||
    int flex_array_shrink(struct flex_array *array);
 | 
			
		||||
 | 
			
		||||
The return value will be the number of pages of memory actually freed.
 | 
			
		||||
This function works by scanning the array for pages containing nothing but
 | 
			
		||||
``FLEX_ARRAY_FREE`` bytes, so (1) it can be expensive, and (2) it will not work
 | 
			
		||||
if the array's pages are allocated with ``__GFP_ZERO``.
 | 
			
		||||
 | 
			
		||||
It is possible to remove all elements of an array with a call to
 | 
			
		||||
:c:func:`flex_array_free_parts()`::
 | 
			
		||||
 | 
			
		||||
    void flex_array_free_parts(struct flex_array *array);
 | 
			
		||||
 | 
			
		||||
This call frees all elements, but leaves the array itself in place.
 | 
			
		||||
Freeing the entire array is done with :c:func:`flex_array_free()`::
 | 
			
		||||
 | 
			
		||||
    void flex_array_free(struct flex_array *array);
 | 
			
		||||
 | 
			
		||||
As of this writing, there are no users of flexible arrays in the mainline
 | 
			
		||||
kernel.  The functions described here are also not exported to modules;
 | 
			
		||||
that will probably be fixed when somebody comes up with a need for it.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Flexible array functions
 | 
			
		||||
------------------------
 | 
			
		||||
 | 
			
		||||
.. kernel-doc:: include/linux/flex_array.h
 | 
			
		||||
							
								
								
									
										12
									
								
								Documentation/core-api/generic-radix-tree.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								Documentation/core-api/generic-radix-tree.rst
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,12 @@
 | 
			
		|||
=================================
 | 
			
		||||
Generic radix trees/sparse arrays
 | 
			
		||||
=================================
 | 
			
		||||
 | 
			
		||||
.. kernel-doc:: include/linux/generic-radix-tree.h
 | 
			
		||||
   :doc: Generic radix trees/sparse arrays
 | 
			
		||||
 | 
			
		||||
generic radix tree functions
 | 
			
		||||
----------------------------
 | 
			
		||||
 | 
			
		||||
.. kernel-doc:: include/linux/generic-radix-tree.h
 | 
			
		||||
   :functions:
 | 
			
		||||
| 
						 | 
				
			
			@ -28,6 +28,7 @@ Core utilities
 | 
			
		|||
   errseq
 | 
			
		||||
   printk-formats
 | 
			
		||||
   circular-buffers
 | 
			
		||||
   generic-radix-tree
 | 
			
		||||
   memory-allocation
 | 
			
		||||
   mm-api
 | 
			
		||||
   gfp_mask-from-fs-io
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,123 +0,0 @@
 | 
			
		|||
===================================
 | 
			
		||||
Using flexible arrays in the kernel
 | 
			
		||||
===================================
 | 
			
		||||
 | 
			
		||||
:Updated: Last updated for 2.6.32
 | 
			
		||||
:Author: Jonathan Corbet <corbet@lwn.net>
 | 
			
		||||
 | 
			
		||||
Large contiguous memory allocations can be unreliable in the Linux kernel.
 | 
			
		||||
Kernel programmers will sometimes respond to this problem by allocating
 | 
			
		||||
pages with vmalloc().  This solution not ideal, though.  On 32-bit systems,
 | 
			
		||||
memory from vmalloc() must be mapped into a relatively small address space;
 | 
			
		||||
it's easy to run out.  On SMP systems, the page table changes required by
 | 
			
		||||
vmalloc() allocations can require expensive cross-processor interrupts on
 | 
			
		||||
all CPUs.  And, on all systems, use of space in the vmalloc() range
 | 
			
		||||
increases pressure on the translation lookaside buffer (TLB), reducing the
 | 
			
		||||
performance of the system.
 | 
			
		||||
 | 
			
		||||
In many cases, the need for memory from vmalloc() can be eliminated by
 | 
			
		||||
piecing together an array from smaller parts; the flexible array library
 | 
			
		||||
exists to make this task easier.
 | 
			
		||||
 | 
			
		||||
A flexible array holds an arbitrary (within limits) number of fixed-sized
 | 
			
		||||
objects, accessed via an integer index.  Sparse arrays are handled
 | 
			
		||||
reasonably well.  Only single-page allocations are made, so memory
 | 
			
		||||
allocation failures should be relatively rare.  The down sides are that the
 | 
			
		||||
arrays cannot be indexed directly, individual object size cannot exceed the
 | 
			
		||||
system page size, and putting data into a flexible array requires a copy
 | 
			
		||||
operation.  It's also worth noting that flexible arrays do no internal
 | 
			
		||||
locking at all; if concurrent access to an array is possible, then the
 | 
			
		||||
caller must arrange for appropriate mutual exclusion.
 | 
			
		||||
 | 
			
		||||
The creation of a flexible array is done with::
 | 
			
		||||
 | 
			
		||||
    #include <linux/flex_array.h>
 | 
			
		||||
 | 
			
		||||
    struct flex_array *flex_array_alloc(int element_size,
 | 
			
		||||
					unsigned int total,
 | 
			
		||||
					gfp_t flags);
 | 
			
		||||
 | 
			
		||||
The individual object size is provided by element_size, while total is the
 | 
			
		||||
maximum number of objects which can be stored in the array.  The flags
 | 
			
		||||
argument is passed directly to the internal memory allocation calls.  With
 | 
			
		||||
the current code, using flags to ask for high memory is likely to lead to
 | 
			
		||||
notably unpleasant side effects.
 | 
			
		||||
 | 
			
		||||
It is also possible to define flexible arrays at compile time with::
 | 
			
		||||
 | 
			
		||||
    DEFINE_FLEX_ARRAY(name, element_size, total);
 | 
			
		||||
 | 
			
		||||
This macro will result in a definition of an array with the given name; the
 | 
			
		||||
element size and total will be checked for validity at compile time.
 | 
			
		||||
 | 
			
		||||
Storing data into a flexible array is accomplished with a call to::
 | 
			
		||||
 | 
			
		||||
    int flex_array_put(struct flex_array *array, unsigned int element_nr,
 | 
			
		||||
    		       void *src, gfp_t flags);
 | 
			
		||||
 | 
			
		||||
This call will copy the data from src into the array, in the position
 | 
			
		||||
indicated by element_nr (which must be less than the maximum specified when
 | 
			
		||||
the array was created).  If any memory allocations must be performed, flags
 | 
			
		||||
will be used.  The return value is zero on success, a negative error code
 | 
			
		||||
otherwise.
 | 
			
		||||
 | 
			
		||||
There might possibly be a need to store data into a flexible array while
 | 
			
		||||
running in some sort of atomic context; in this situation, sleeping in the
 | 
			
		||||
memory allocator would be a bad thing.  That can be avoided by using
 | 
			
		||||
GFP_ATOMIC for the flags value, but, often, there is a better way.  The
 | 
			
		||||
trick is to ensure that any needed memory allocations are done before
 | 
			
		||||
entering atomic context, using::
 | 
			
		||||
 | 
			
		||||
    int flex_array_prealloc(struct flex_array *array, unsigned int start,
 | 
			
		||||
			    unsigned int nr_elements, gfp_t flags);
 | 
			
		||||
 | 
			
		||||
This function will ensure that memory for the elements indexed in the range
 | 
			
		||||
defined by start and nr_elements has been allocated.  Thereafter, a
 | 
			
		||||
flex_array_put() call on an element in that range is guaranteed not to
 | 
			
		||||
block.
 | 
			
		||||
 | 
			
		||||
Getting data back out of the array is done with::
 | 
			
		||||
 | 
			
		||||
    void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
 | 
			
		||||
 | 
			
		||||
The return value is a pointer to the data element, or NULL if that
 | 
			
		||||
particular element has never been allocated.
 | 
			
		||||
 | 
			
		||||
Note that it is possible to get back a valid pointer for an element which
 | 
			
		||||
has never been stored in the array.  Memory for array elements is allocated
 | 
			
		||||
one page at a time; a single allocation could provide memory for several
 | 
			
		||||
adjacent elements.  Flexible array elements are normally initialized to the
 | 
			
		||||
value FLEX_ARRAY_FREE (defined as 0x6c in <linux/poison.h>), so errors
 | 
			
		||||
involving that number probably result from use of unstored array entries.
 | 
			
		||||
Note that, if array elements are allocated with __GFP_ZERO, they will be
 | 
			
		||||
initialized to zero and this poisoning will not happen.
 | 
			
		||||
 | 
			
		||||
Individual elements in the array can be cleared with::
 | 
			
		||||
 | 
			
		||||
    int flex_array_clear(struct flex_array *array, unsigned int element_nr);
 | 
			
		||||
 | 
			
		||||
This function will set the given element to FLEX_ARRAY_FREE and return
 | 
			
		||||
zero.  If storage for the indicated element is not allocated for the array,
 | 
			
		||||
flex_array_clear() will return -EINVAL instead.  Note that clearing an
 | 
			
		||||
element does not release the storage associated with it; to reduce the
 | 
			
		||||
allocated size of an array, call::
 | 
			
		||||
 | 
			
		||||
    int flex_array_shrink(struct flex_array *array);
 | 
			
		||||
 | 
			
		||||
The return value will be the number of pages of memory actually freed.
 | 
			
		||||
This function works by scanning the array for pages containing nothing but
 | 
			
		||||
FLEX_ARRAY_FREE bytes, so (1) it can be expensive, and (2) it will not work
 | 
			
		||||
if the array's pages are allocated with __GFP_ZERO.
 | 
			
		||||
 | 
			
		||||
It is possible to remove all elements of an array with a call to::
 | 
			
		||||
 | 
			
		||||
    void flex_array_free_parts(struct flex_array *array);
 | 
			
		||||
 | 
			
		||||
This call frees all elements, but leaves the array itself in place.
 | 
			
		||||
Freeing the entire array is done with::
 | 
			
		||||
 | 
			
		||||
    void flex_array_free(struct flex_array *array);
 | 
			
		||||
 | 
			
		||||
As of this writing, there are no users of flexible arrays in the mainline
 | 
			
		||||
kernel.  The functions described here are also not exported to modules;
 | 
			
		||||
that will probably be fixed when somebody comes up with a need for it.
 | 
			
		||||
| 
						 | 
				
			
			@ -331,7 +331,10 @@ cia_prepare_tbia_workaround(int window)
 | 
			
		|||
	long i;
 | 
			
		||||
 | 
			
		||||
	/* Use minimal 1K map. */
 | 
			
		||||
	ppte = memblock_alloc_from(CIA_BROKEN_TBIA_SIZE, 32768, 0);
 | 
			
		||||
	ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768);
 | 
			
		||||
	if (!ppte)
 | 
			
		||||
		panic("%s: Failed to allocate %u bytes align=0x%x\n",
 | 
			
		||||
		      __func__, CIA_BROKEN_TBIA_SIZE, 32768);
 | 
			
		||||
	pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -83,6 +83,9 @@ mk_resource_name(int pe, int port, char *str)
 | 
			
		|||
	
 | 
			
		||||
	sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
 | 
			
		||||
	name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!name)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      strlen(tmp) + 1);
 | 
			
		||||
	strcpy(name, tmp);
 | 
			
		||||
 | 
			
		||||
	return name;
 | 
			
		||||
| 
						 | 
				
			
			@ -118,6 +121,9 @@ alloc_io7(unsigned int pe)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES);
 | 
			
		||||
	if (!io7)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(*io7));
 | 
			
		||||
	io7->pe = pe;
 | 
			
		||||
	raw_spin_lock_init(&io7->irq_lock);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -34,6 +34,9 @@ alloc_pci_controller(void)
 | 
			
		|||
	struct pci_controller *hose;
 | 
			
		||||
 | 
			
		||||
	hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
 | 
			
		||||
	if (!hose)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(*hose));
 | 
			
		||||
 | 
			
		||||
	*hose_tail = hose;
 | 
			
		||||
	hose_tail = &hose->next;
 | 
			
		||||
| 
						 | 
				
			
			@ -44,7 +47,13 @@ alloc_pci_controller(void)
 | 
			
		|||
struct resource * __init
 | 
			
		||||
alloc_resource(void)
 | 
			
		||||
{
 | 
			
		||||
	return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
 | 
			
		||||
	void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
 | 
			
		||||
 | 
			
		||||
	if (!ptr)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(struct resource));
 | 
			
		||||
 | 
			
		||||
	return ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
 | 
			
		||||
| 
						 | 
				
			
			@ -54,7 +63,7 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
 | 
			
		|||
 | 
			
		||||
	/* from hose or from bus.devfn */
 | 
			
		||||
	if (which & IOBASE_FROM_HOSE) {
 | 
			
		||||
		for (hose = hose_head; hose; hose = hose->next) 
 | 
			
		||||
		for (hose = hose_head; hose; hose = hose->next)
 | 
			
		||||
			if (hose->index == bus)
 | 
			
		||||
				break;
 | 
			
		||||
		if (!hose)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -393,6 +393,9 @@ alloc_pci_controller(void)
 | 
			
		|||
	struct pci_controller *hose;
 | 
			
		||||
 | 
			
		||||
	hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
 | 
			
		||||
	if (!hose)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(*hose));
 | 
			
		||||
 | 
			
		||||
	*hose_tail = hose;
 | 
			
		||||
	hose_tail = &hose->next;
 | 
			
		||||
| 
						 | 
				
			
			@ -403,7 +406,13 @@ alloc_pci_controller(void)
 | 
			
		|||
struct resource * __init
 | 
			
		||||
alloc_resource(void)
 | 
			
		||||
{
 | 
			
		||||
	return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
 | 
			
		||||
	void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
 | 
			
		||||
 | 
			
		||||
	if (!ptr)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(struct resource));
 | 
			
		||||
 | 
			
		||||
	return ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -80,6 +80,9 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
 | 
			
		|||
		       "    falling back to system-wide allocation\n",
 | 
			
		||||
		       __func__, nid);
 | 
			
		||||
		arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
 | 
			
		||||
		if (!arena)
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
			      sizeof(*arena));
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
 | 
			
		||||
| 
						 | 
				
			
			@ -87,13 +90,22 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
 | 
			
		|||
		printk("%s: couldn't allocate arena ptes from node %d\n"
 | 
			
		||||
		       "    falling back to system-wide allocation\n",
 | 
			
		||||
		       __func__, nid);
 | 
			
		||||
		arena->ptes = memblock_alloc_from(mem_size, align, 0);
 | 
			
		||||
		arena->ptes = memblock_alloc(mem_size, align);
 | 
			
		||||
		if (!arena->ptes)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
			      __func__, mem_size, align);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
#else /* CONFIG_DISCONTIGMEM */
 | 
			
		||||
 | 
			
		||||
	arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
 | 
			
		||||
	arena->ptes = memblock_alloc_from(mem_size, align, 0);
 | 
			
		||||
	if (!arena)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(*arena));
 | 
			
		||||
	arena->ptes = memblock_alloc(mem_size, align);
 | 
			
		||||
	if (!arena->ptes)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, mem_size, align);
 | 
			
		||||
 | 
			
		||||
#endif /* CONFIG_DISCONTIGMEM */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -293,7 +293,7 @@ move_initrd(unsigned long mem_limit)
 | 
			
		|||
	unsigned long size;
 | 
			
		||||
 | 
			
		||||
	size = initrd_end - initrd_start;
 | 
			
		||||
	start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0);
 | 
			
		||||
	start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE);
 | 
			
		||||
	if (!start || __pa(start) + size > mem_limit) {
 | 
			
		||||
		initrd_start = initrd_end = 0;
 | 
			
		||||
		return NULL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -181,8 +181,7 @@ static void init_unwind_hdr(struct unwind_table *table,
 | 
			
		|||
 */
 | 
			
		||||
static void *__init unw_hdr_alloc_early(unsigned long sz)
 | 
			
		||||
{
 | 
			
		||||
	return memblock_alloc_from_nopanic(sz, sizeof(unsigned int),
 | 
			
		||||
					   MAX_DMA_ADDRESS);
 | 
			
		||||
	return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void *unw_hdr_alloc(unsigned long sz)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -124,6 +124,10 @@ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
 | 
			
		|||
	pmd_k = pmd_offset(pud_k, kvaddr);
 | 
			
		||||
 | 
			
		||||
	pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!pte_k)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	pmd_populate_kernel(&init_mm, pmd_k, pte_k);
 | 
			
		||||
	return pte_k;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -867,6 +867,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
 | 
			
		|||
		boot_alias_start = phys_to_idmap(start);
 | 
			
		||||
		if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
 | 
			
		||||
			res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
 | 
			
		||||
			if (!res)
 | 
			
		||||
				panic("%s: Failed to allocate %zu bytes\n",
 | 
			
		||||
				      __func__, sizeof(*res));
 | 
			
		||||
			res->name = "System RAM (boot alias)";
 | 
			
		||||
			res->start = boot_alias_start;
 | 
			
		||||
			res->end = phys_to_idmap(end);
 | 
			
		||||
| 
						 | 
				
			
			@ -875,6 +878,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
 | 
			
		|||
		}
 | 
			
		||||
 | 
			
		||||
		res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
 | 
			
		||||
		if (!res)
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
			      sizeof(*res));
 | 
			
		||||
		res->name  = "System RAM";
 | 
			
		||||
		res->start = start;
 | 
			
		||||
		res->end = end;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -205,7 +205,11 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
 | 
			
		|||
 | 
			
		||||
	BUG_ON(!arm_memblock_steal_permitted);
 | 
			
		||||
 | 
			
		||||
	phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
 | 
			
		||||
	phys = memblock_phys_alloc(size, align);
 | 
			
		||||
	if (!phys)
 | 
			
		||||
		panic("Failed to steal %pa bytes at %pS\n",
 | 
			
		||||
		      &size, (void *)_RET_IP_);
 | 
			
		||||
 | 
			
		||||
	memblock_free(phys, size);
 | 
			
		||||
	memblock_remove(phys, size);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -721,7 +721,13 @@ EXPORT_SYMBOL(phys_mem_access_prot);
 | 
			
		|||
 | 
			
		||||
static void __init *early_alloc(unsigned long sz)
 | 
			
		||||
{
 | 
			
		||||
	return memblock_alloc(sz, sz);
 | 
			
		||||
	void *ptr = memblock_alloc(sz, sz);
 | 
			
		||||
 | 
			
		||||
	if (!ptr)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, sz, sz);
 | 
			
		||||
 | 
			
		||||
	return ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void *__init late_alloc(unsigned long sz)
 | 
			
		||||
| 
						 | 
				
			
			@ -994,6 +1000,9 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
 | 
			
		|||
		return;
 | 
			
		||||
 | 
			
		||||
	svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));
 | 
			
		||||
	if (!svm)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
 | 
			
		||||
		      __func__, sizeof(*svm) * nr, __alignof__(*svm));
 | 
			
		||||
 | 
			
		||||
	for (md = io_desc; nr; md++, nr--) {
 | 
			
		||||
		create_mapping(md);
 | 
			
		||||
| 
						 | 
				
			
			@ -1016,6 +1025,9 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
 | 
			
		|||
	struct static_vm *svm;
 | 
			
		||||
 | 
			
		||||
	svm = memblock_alloc(sizeof(*svm), __alignof__(*svm));
 | 
			
		||||
	if (!svm)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
 | 
			
		||||
		      __func__, sizeof(*svm), __alignof__(*svm));
 | 
			
		||||
 | 
			
		||||
	vm = &svm->vm;
 | 
			
		||||
	vm->addr = (void *)addr;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -208,6 +208,7 @@ static void __init request_standard_resources(void)
 | 
			
		|||
	struct memblock_region *region;
 | 
			
		||||
	struct resource *res;
 | 
			
		||||
	unsigned long i = 0;
 | 
			
		||||
	size_t res_size;
 | 
			
		||||
 | 
			
		||||
	kernel_code.start   = __pa_symbol(_text);
 | 
			
		||||
	kernel_code.end     = __pa_symbol(__init_begin - 1);
 | 
			
		||||
| 
						 | 
				
			
			@ -215,9 +216,10 @@ static void __init request_standard_resources(void)
 | 
			
		|||
	kernel_data.end     = __pa_symbol(_end - 1);
 | 
			
		||||
 | 
			
		||||
	num_standard_resources = memblock.memory.cnt;
 | 
			
		||||
	standard_resources = memblock_alloc_low(num_standard_resources *
 | 
			
		||||
					        sizeof(*standard_resources),
 | 
			
		||||
					        SMP_CACHE_BYTES);
 | 
			
		||||
	res_size = num_standard_resources * sizeof(*standard_resources);
 | 
			
		||||
	standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!standard_resources)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
 | 
			
		||||
 | 
			
		||||
	for_each_memblock(memory, region) {
 | 
			
		||||
		res = &standard_resources[i++];
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -40,6 +40,11 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
 | 
			
		|||
	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
 | 
			
		||||
					      __pa(MAX_DMA_ADDRESS),
 | 
			
		||||
					      MEMBLOCK_ALLOC_KASAN, node);
 | 
			
		||||
	if (!p)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE, node,
 | 
			
		||||
		      __pa(MAX_DMA_ADDRESS));
 | 
			
		||||
 | 
			
		||||
	return __pa(p);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -48,6 +53,11 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
 | 
			
		|||
	void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
 | 
			
		||||
						__pa(MAX_DMA_ADDRESS),
 | 
			
		||||
						MEMBLOCK_ALLOC_KASAN, node);
 | 
			
		||||
	if (!p)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE, node,
 | 
			
		||||
		      __pa(MAX_DMA_ADDRESS));
 | 
			
		||||
 | 
			
		||||
	return __pa(p);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -103,6 +103,8 @@ static phys_addr_t __init early_pgtable_alloc(void)
 | 
			
		|||
	void *ptr;
 | 
			
		||||
 | 
			
		||||
	phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!phys)
 | 
			
		||||
		panic("Failed to allocate page table page\n");
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -237,6 +237,10 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
 | 
			
		|||
		pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
 | 
			
		||||
 | 
			
		||||
	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
 | 
			
		||||
	if (!nd_pa)
 | 
			
		||||
		panic("Cannot allocate %zu bytes for node %d data\n",
 | 
			
		||||
		      nd_size, nid);
 | 
			
		||||
 | 
			
		||||
	nd = __va(nd_pa);
 | 
			
		||||
 | 
			
		||||
	/* report and initialize */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -138,6 +138,10 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
 | 
			
		|||
 | 
			
		||||
	dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
 | 
			
		||||
				    sizeof(long));
 | 
			
		||||
	if (!dma_bitmap)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
 | 
			
		||||
		      __func__, BITS_TO_LONGS(dma_pages) * sizeof(long),
 | 
			
		||||
		      sizeof(long));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -40,7 +40,9 @@ void __init paging_init(void)
 | 
			
		|||
 | 
			
		||||
	empty_zero_page      = (unsigned long) memblock_alloc(PAGE_SIZE,
 | 
			
		||||
							      PAGE_SIZE);
 | 
			
		||||
	memset((void *)empty_zero_page, 0, PAGE_SIZE);
 | 
			
		||||
	if (!empty_zero_page)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Set up user data space
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -141,6 +141,11 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
 | 
			
		|||
			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
 | 
			
		||||
				if (pmd_none(*pmd)) {
 | 
			
		||||
					pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
					if (!pte)
 | 
			
		||||
						panic("%s: Failed to allocate %lu bytes align=%lx\n",
 | 
			
		||||
						      __func__, PAGE_SIZE,
 | 
			
		||||
						      PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
					set_pmd(pmd, __pmd(__pa(pte)));
 | 
			
		||||
					BUG_ON(pte != pte_offset_kernel(pmd, 0));
 | 
			
		||||
				}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -68,7 +68,9 @@ void __init paging_init(void)
 | 
			
		|||
	 * to a couple of allocated pages.
 | 
			
		||||
	 */
 | 
			
		||||
	empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	memset((void *)empty_zero_page, 0, PAGE_SIZE);
 | 
			
		||||
	if (!empty_zero_page)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Set up SFC/DFC registers (user data space).
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -359,11 +359,6 @@ typedef struct ia64_state_log_s
 | 
			
		|||
 | 
			
		||||
static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
 | 
			
		||||
 | 
			
		||||
#define IA64_LOG_ALLOCATE(it, size) \
 | 
			
		||||
	{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
 | 
			
		||||
		(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); \
 | 
			
		||||
	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
 | 
			
		||||
		(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);}
 | 
			
		||||
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
 | 
			
		||||
#define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
 | 
			
		||||
#define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
 | 
			
		||||
| 
						 | 
				
			
			@ -378,6 +373,19 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
 | 
			
		|||
#define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
 | 
			
		||||
#define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
 | 
			
		||||
 | 
			
		||||
static inline void ia64_log_allocate(int it, u64 size)
 | 
			
		||||
{
 | 
			
		||||
	ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] =
 | 
			
		||||
		(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])
 | 
			
		||||
		panic("%s: Failed to allocate %llu bytes\n", __func__, size);
 | 
			
		||||
 | 
			
		||||
	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] =
 | 
			
		||||
		(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])
 | 
			
		||||
		panic("%s: Failed to allocate %llu bytes\n", __func__, size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * ia64_log_init
 | 
			
		||||
 *	Reset the OS ia64 log buffer
 | 
			
		||||
| 
						 | 
				
			
			@ -399,9 +407,7 @@ ia64_log_init(int sal_info_type)
 | 
			
		|||
		return;
 | 
			
		||||
 | 
			
		||||
	// set up OS data structures to hold error info
 | 
			
		||||
	IA64_LOG_ALLOCATE(sal_info_type, max_size);
 | 
			
		||||
	memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
 | 
			
		||||
	memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
 | 
			
		||||
	ia64_log_allocate(sal_info_type, max_size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -1835,8 +1841,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
 | 
			
		|||
/* Caller prevents this from being called after init */
 | 
			
		||||
static void * __ref mca_bootmem(void)
 | 
			
		||||
{
 | 
			
		||||
	return memblock_alloc_from(sizeof(struct ia64_mca_cpu),
 | 
			
		||||
				   KERNEL_STACK_SIZE, 0);
 | 
			
		||||
	return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Do per-CPU MCA-related initialization.  */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -84,9 +84,13 @@ void *per_cpu_init(void)
 | 
			
		|||
static inline void
 | 
			
		||||
alloc_per_cpu_data(void)
 | 
			
		||||
{
 | 
			
		||||
	cpu_data = memblock_alloc_from(PERCPU_PAGE_SIZE * num_possible_cpus(),
 | 
			
		||||
				       PERCPU_PAGE_SIZE,
 | 
			
		||||
	size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
 | 
			
		||||
 | 
			
		||||
	cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
 | 
			
		||||
				       __pa(MAX_DMA_ADDRESS));
 | 
			
		||||
	if (!cpu_data)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
 | 
			
		||||
		      __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -454,6 +454,10 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
 | 
			
		|||
				     __pa(MAX_DMA_ADDRESS),
 | 
			
		||||
				     MEMBLOCK_ALLOC_ACCESSIBLE,
 | 
			
		||||
				     bestnode);
 | 
			
		||||
	if (!ptr)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
 | 
			
		||||
		      __func__, pernodesize, PERCPU_PAGE_SIZE, bestnode,
 | 
			
		||||
		      __pa(MAX_DMA_ADDRESS));
 | 
			
		||||
 | 
			
		||||
	return ptr;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -444,23 +444,45 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
 | 
			
		|||
 | 
			
		||||
	for (address = start_page; address < end_page; address += PAGE_SIZE) {
 | 
			
		||||
		pgd = pgd_offset_k(address);
 | 
			
		||||
		if (pgd_none(*pgd))
 | 
			
		||||
			pgd_populate(&init_mm, pgd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
 | 
			
		||||
		if (pgd_none(*pgd)) {
 | 
			
		||||
			pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
 | 
			
		||||
			if (!pud)
 | 
			
		||||
				goto err_alloc;
 | 
			
		||||
			pgd_populate(&init_mm, pgd, pud);
 | 
			
		||||
		}
 | 
			
		||||
		pud = pud_offset(pgd, address);
 | 
			
		||||
 | 
			
		||||
		if (pud_none(*pud))
 | 
			
		||||
			pud_populate(&init_mm, pud, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
 | 
			
		||||
		if (pud_none(*pud)) {
 | 
			
		||||
			pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
 | 
			
		||||
			if (!pmd)
 | 
			
		||||
				goto err_alloc;
 | 
			
		||||
			pud_populate(&init_mm, pud, pmd);
 | 
			
		||||
		}
 | 
			
		||||
		pmd = pmd_offset(pud, address);
 | 
			
		||||
 | 
			
		||||
		if (pmd_none(*pmd))
 | 
			
		||||
			pmd_populate_kernel(&init_mm, pmd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
 | 
			
		||||
		if (pmd_none(*pmd)) {
 | 
			
		||||
			pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
 | 
			
		||||
			if (!pte)
 | 
			
		||||
				goto err_alloc;
 | 
			
		||||
			pmd_populate_kernel(&init_mm, pmd, pte);
 | 
			
		||||
		}
 | 
			
		||||
		pte = pte_offset_kernel(pmd, address);
 | 
			
		||||
 | 
			
		||||
		if (pte_none(*pte))
 | 
			
		||||
			set_pte(pte, pfn_pte(__pa(memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)) >> PAGE_SHIFT,
 | 
			
		||||
		if (pte_none(*pte)) {
 | 
			
		||||
			void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
 | 
			
		||||
							 node);
 | 
			
		||||
			if (!page)
 | 
			
		||||
				goto err_alloc;
 | 
			
		||||
			set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
 | 
			
		||||
					     PAGE_KERNEL));
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
err_alloc:
 | 
			
		||||
	panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
 | 
			
		||||
	      __func__, PAGE_SIZE, PAGE_SIZE, node);
 | 
			
		||||
	return -ENOMEM;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct memmap_init_callback_data {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,8 +61,14 @@ mmu_context_init (void)
 | 
			
		|||
{
 | 
			
		||||
	ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
 | 
			
		||||
					 SMP_CACHE_BYTES);
 | 
			
		||||
	if (!ia64_ctx.bitmap)
 | 
			
		||||
		panic("%s: Failed to allocate %u bytes\n", __func__,
 | 
			
		||||
		      (ia64_ctx.max_ctx + 1) >> 3);
 | 
			
		||||
	ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
 | 
			
		||||
					   SMP_CACHE_BYTES);
 | 
			
		||||
	if (!ia64_ctx.flushmap)
 | 
			
		||||
		panic("%s: Failed to allocate %u bytes\n", __func__,
 | 
			
		||||
		      (ia64_ctx.max_ctx + 1) >> 3);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -394,6 +394,9 @@ void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
 | 
			
		|||
	hubdev_info = (struct hubdev_info *)memblock_alloc_node(size,
 | 
			
		||||
								SMP_CACHE_BYTES,
 | 
			
		||||
								node);
 | 
			
		||||
	if (!hubdev_info)
 | 
			
		||||
		panic("%s: Failed to allocate %d bytes align=0x%x nid=%d\n",
 | 
			
		||||
		      __func__, size, SMP_CACHE_BYTES, node);
 | 
			
		||||
 | 
			
		||||
	npda->pdinfo = (void *)hubdev_info;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -513,6 +513,10 @@ static void __init sn_init_pdas(char **cmdline_p)
 | 
			
		|||
		nodepdaindr[cnode] =
 | 
			
		||||
		    memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES,
 | 
			
		||||
					cnode);
 | 
			
		||||
		if (!nodepdaindr[cnode])
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
 | 
			
		||||
			      __func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
 | 
			
		||||
			      cnode);
 | 
			
		||||
		memset(nodepdaindr[cnode]->phys_cpuid, -1,
 | 
			
		||||
		    sizeof(nodepdaindr[cnode]->phys_cpuid));
 | 
			
		||||
		spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -521,9 +525,15 @@ static void __init sn_init_pdas(char **cmdline_p)
 | 
			
		|||
	/*
 | 
			
		||||
	 * Allocate & initialize nodepda for TIOs.  For now, put them on node 0.
 | 
			
		||||
	 */
 | 
			
		||||
	for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++)
 | 
			
		||||
	for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
 | 
			
		||||
		nodepdaindr[cnode] =
 | 
			
		||||
		    memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0);
 | 
			
		||||
		if (!nodepdaindr[cnode])
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
 | 
			
		||||
			      __func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
 | 
			
		||||
			      cnode);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Now copy the array of nodepda pointers to each nodepda.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -97,6 +97,10 @@ void __init atari_stram_reserve_pages(void *start_mem)
 | 
			
		|||
		pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n");
 | 
			
		||||
		stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size,
 | 
			
		||||
								       PAGE_SIZE);
 | 
			
		||||
		if (!stram_pool.start)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=%lx\n",
 | 
			
		||||
			      __func__, pool_size, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
		stram_pool.end = stram_pool.start + pool_size - 1;
 | 
			
		||||
		request_resource(&iomem_resource, &stram_pool);
 | 
			
		||||
		stram_virt_offset = 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -94,6 +94,9 @@ void __init paging_init(void)
 | 
			
		|||
	high_memory = (void *) end_mem;
 | 
			
		||||
 | 
			
		||||
	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!empty_zero_page)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Set up SFC/DFC registers (user data space).
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -44,7 +44,9 @@ void __init paging_init(void)
 | 
			
		|||
	int i;
 | 
			
		||||
 | 
			
		||||
	empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	memset((void *) empty_zero_page, 0, PAGE_SIZE);
 | 
			
		||||
	if (!empty_zero_page)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	pg_dir = swapper_pg_dir;
 | 
			
		||||
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
 | 
			
		||||
| 
						 | 
				
			
			@ -52,6 +54,9 @@ void __init paging_init(void)
 | 
			
		|||
	size = num_pages * sizeof(pte_t);
 | 
			
		||||
	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
 | 
			
		||||
	next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
 | 
			
		||||
	if (!next_pgtable)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, size, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
 | 
			
		||||
	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -55,6 +55,9 @@ static pte_t * __init kernel_page_table(void)
 | 
			
		|||
	pte_t *ptablep;
 | 
			
		||||
 | 
			
		||||
	ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!ptablep)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	clear_page(ptablep);
 | 
			
		||||
	__flush_page_to_ram(ptablep);
 | 
			
		||||
| 
						 | 
				
			
			@ -96,6 +99,9 @@ static pmd_t * __init kernel_ptr_table(void)
 | 
			
		|||
	if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
 | 
			
		||||
		last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
 | 
			
		||||
							   PAGE_SIZE);
 | 
			
		||||
		if (!last_pgtable)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=%lx\n",
 | 
			
		||||
			      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
		clear_page(last_pgtable);
 | 
			
		||||
		__flush_page_to_ram(last_pgtable);
 | 
			
		||||
| 
						 | 
				
			
			@ -278,6 +284,9 @@ void __init paging_init(void)
 | 
			
		|||
	 * to a couple of allocated pages
 | 
			
		||||
	 */
 | 
			
		||||
	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!empty_zero_page)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Set up SFC/DFC registers
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -46,6 +46,9 @@ void __init paging_init(void)
 | 
			
		|||
	unsigned long size;
 | 
			
		||||
 | 
			
		||||
	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!empty_zero_page)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	address = PAGE_OFFSET;
 | 
			
		||||
	pg_dir = swapper_pg_dir;
 | 
			
		||||
| 
						 | 
				
			
			@ -56,6 +59,9 @@ void __init paging_init(void)
 | 
			
		|||
	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
 | 
			
		||||
 | 
			
		||||
	next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
 | 
			
		||||
	if (!next_pgtable)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, size, PAGE_SIZE);
 | 
			
		||||
	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
 | 
			
		||||
 | 
			
		||||
	/* Map whole memory from PAGE_OFFSET (0x0E000000) */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -269,6 +269,9 @@ void __init dvma_init(void)
 | 
			
		|||
 | 
			
		||||
	iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
 | 
			
		||||
				   SMP_CACHE_BYTES);
 | 
			
		||||
	if (!iommu_use)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
 | 
			
		||||
 | 
			
		||||
	dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -374,12 +374,14 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
 | 
			
		|||
{
 | 
			
		||||
	void *p;
 | 
			
		||||
 | 
			
		||||
	if (mem_init_done)
 | 
			
		||||
	if (mem_init_done) {
 | 
			
		||||
		p = kzalloc(size, mask);
 | 
			
		||||
	else {
 | 
			
		||||
	} else {
 | 
			
		||||
		p = memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
		if (p)
 | 
			
		||||
			memset(p, 0, size);
 | 
			
		||||
		if (!p)
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes\n",
 | 
			
		||||
			      __func__, size);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return p;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -245,6 +245,9 @@ void __init plat_swiotlb_setup(void)
 | 
			
		|||
	swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
 | 
			
		||||
 | 
			
		||||
	octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE);
 | 
			
		||||
	if (!octeon_swiotlb)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes align=%lx\n",
 | 
			
		||||
		      __func__, swiotlbsize, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
 | 
			
		||||
		panic("Cannot allocate SWIOTLB buffer");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -919,6 +919,9 @@ static void __init resource_init(void)
 | 
			
		|||
			end = HIGHMEM_START - 1;
 | 
			
		||||
 | 
			
		||||
		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
 | 
			
		||||
		if (!res)
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
			      sizeof(struct resource));
 | 
			
		||||
 | 
			
		||||
		res->start = start;
 | 
			
		||||
		res->end = end;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2293,7 +2293,10 @@ void __init trap_init(void)
 | 
			
		|||
		phys_addr_t ebase_pa;
 | 
			
		||||
 | 
			
		||||
		ebase = (unsigned long)
 | 
			
		||||
			memblock_alloc_from(size, 1 << fls(size), 0);
 | 
			
		||||
			memblock_alloc(size, 1 << fls(size));
 | 
			
		||||
		if (!ebase)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=0x%x\n",
 | 
			
		||||
			      __func__, size, 1 << fls(size));
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Try to ensure ebase resides in KSeg0 if possible.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -252,6 +252,11 @@ void __init fixrange_init(unsigned long start, unsigned long end,
 | 
			
		|||
				if (pmd_none(*pmd)) {
 | 
			
		||||
					pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
 | 
			
		||||
									   PAGE_SIZE);
 | 
			
		||||
					if (!pte)
 | 
			
		||||
						panic("%s: Failed to allocate %lu bytes align=%lx\n",
 | 
			
		||||
						      __func__, PAGE_SIZE,
 | 
			
		||||
						      PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
					set_pmd(pmd, __pmd((unsigned long)pte));
 | 
			
		||||
					BUG_ON(pte != pte_offset_kernel(pmd, 0));
 | 
			
		||||
				}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -79,6 +79,9 @@ static void __init map_ram(void)
 | 
			
		|||
 | 
			
		||||
		/* Alloc one page for holding PTE's... */
 | 
			
		||||
		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		if (!pte)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
			      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
 | 
			
		||||
 | 
			
		||||
		/* Fill the newly allocated page with PTE'S */
 | 
			
		||||
| 
						 | 
				
			
			@ -111,6 +114,9 @@ static void __init fixedrange_init(void)
 | 
			
		|||
	pud = pud_offset(pgd, vaddr);
 | 
			
		||||
	pmd = pmd_offset(pud, vaddr);
 | 
			
		||||
	fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!fixmap_pmd_p)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
| 
						 | 
				
			
			@ -123,6 +129,9 @@ static void __init fixedrange_init(void)
 | 
			
		|||
	pud = pud_offset(pgd, vaddr);
 | 
			
		||||
	pmd = pmd_offset(pud, vaddr);
 | 
			
		||||
	pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!pte)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
 | 
			
		||||
	pkmap_page_table = pte;
 | 
			
		||||
#endif /* CONFIG_HIGHMEM */
 | 
			
		||||
| 
						 | 
				
			
			@ -148,6 +157,9 @@ void __init paging_init(void)
 | 
			
		|||
 | 
			
		||||
	/* allocate space for empty_zero_page */
 | 
			
		||||
	zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!zero_page)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	zone_sizes_init();
 | 
			
		||||
 | 
			
		||||
	empty_zero_page = virt_to_page(zero_page);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -105,7 +105,10 @@ static void __init map_ram(void)
 | 
			
		|||
			}
 | 
			
		||||
 | 
			
		||||
			/* Alloc one page for holding PTE's... */
 | 
			
		||||
			pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
 | 
			
		||||
			pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
			if (!pte)
 | 
			
		||||
				panic("%s: Failed to allocate page for PTEs\n",
 | 
			
		||||
				      __func__);
 | 
			
		||||
			set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
 | 
			
		||||
 | 
			
		||||
			/* Fill the newly allocated page with PTE'S */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -122,10 +122,14 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
 | 
			
		|||
{
 | 
			
		||||
	pte_t *pte;
 | 
			
		||||
 | 
			
		||||
	if (likely(mem_init_done))
 | 
			
		||||
	if (likely(mem_init_done)) {
 | 
			
		||||
		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
 | 
			
		||||
	else
 | 
			
		||||
	} else {
 | 
			
		||||
		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		if (!pte)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
			      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return pte;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -810,7 +810,6 @@ static int __init process_cpufeatures_node(unsigned long node,
 | 
			
		|||
	int len;
 | 
			
		||||
 | 
			
		||||
	f = &dt_cpu_features[i];
 | 
			
		||||
	memset(f, 0, sizeof(struct dt_cpu_feature));
 | 
			
		||||
 | 
			
		||||
	f->node = node;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1005,7 +1004,12 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
 | 
			
		|||
	/* Count and allocate space for cpu features */
 | 
			
		||||
	of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
 | 
			
		||||
						&nr_dt_cpu_features);
 | 
			
		||||
	dt_cpu_features = __va(memblock_phys_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE));
 | 
			
		||||
	dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE);
 | 
			
		||||
	if (!dt_cpu_features)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__,
 | 
			
		||||
		      sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
 | 
			
		||||
		      PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	cpufeatures_setup_start(isa);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -196,7 +196,11 @@ void __init allocate_paca_ptrs(void)
 | 
			
		|||
	paca_nr_cpu_ids = nr_cpu_ids;
 | 
			
		||||
 | 
			
		||||
	paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
 | 
			
		||||
	paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, SMP_CACHE_BYTES));
 | 
			
		||||
	paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!paca_ptrs)
 | 
			
		||||
		panic("Failed to allocate %d bytes for paca pointers\n",
 | 
			
		||||
		      paca_ptrs_size);
 | 
			
		||||
 | 
			
		||||
	memset(paca_ptrs, 0x88, paca_ptrs_size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -205,6 +205,9 @@ pci_create_OF_bus_map(void)
 | 
			
		|||
 | 
			
		||||
	of_prop = memblock_alloc(sizeof(struct property) + 256,
 | 
			
		||||
				 SMP_CACHE_BYTES);
 | 
			
		||||
	if (!of_prop)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(struct property) + 256);
 | 
			
		||||
	dn = of_find_node_by_path("/");
 | 
			
		||||
	if (dn) {
 | 
			
		||||
		memset(of_prop, -1, sizeof(struct property) + 256);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -126,7 +126,10 @@ static void __init move_device_tree(void)
 | 
			
		|||
	if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
 | 
			
		||||
	    !memblock_is_memory(start + size - 1) ||
 | 
			
		||||
	    overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
 | 
			
		||||
		p = __va(memblock_phys_alloc(size, PAGE_SIZE));
 | 
			
		||||
		p = memblock_alloc_raw(size, PAGE_SIZE);
 | 
			
		||||
		if (!p)
 | 
			
		||||
			panic("Failed to allocate %lu bytes to move device tree\n",
 | 
			
		||||
			      size);
 | 
			
		||||
		memcpy(p, initial_boot_params, size);
 | 
			
		||||
		initial_boot_params = p;
 | 
			
		||||
		DBG("Moved device tree to 0x%px\n", p);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1187,7 +1187,11 @@ void __init rtas_initialize(void)
 | 
			
		|||
		ibm_suspend_me_token = rtas_token("ibm,suspend-me");
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
	rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
 | 
			
		||||
	rtas_rmo_buf = memblock_phys_alloc_range(RTAS_RMOBUF_MAX, PAGE_SIZE,
 | 
			
		||||
						 0, rtas_region);
 | 
			
		||||
	if (!rtas_rmo_buf)
 | 
			
		||||
		panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
 | 
			
		||||
		      PAGE_SIZE, &rtas_region);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_RTAS_ERROR_LOGGING
 | 
			
		||||
	rtas_last_error_token = rtas_token("rtas-last-error");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -461,6 +461,9 @@ void __init smp_setup_cpu_maps(void)
 | 
			
		|||
 | 
			
		||||
	cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
 | 
			
		||||
					__alignof__(u32));
 | 
			
		||||
	if (!cpu_to_phys_id)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
 | 
			
		||||
		      __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
 | 
			
		||||
 | 
			
		||||
	for_each_node_by_type(dn, "cpu") {
 | 
			
		||||
		const __be32 *intserv;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -905,6 +905,10 @@ static void __ref init_fallback_flush(void)
 | 
			
		|||
	l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
 | 
			
		||||
						l1d_size, MEMBLOCK_LOW_LIMIT,
 | 
			
		||||
						limit, NUMA_NO_NODE);
 | 
			
		||||
	if (!l1d_flush_fallback_area)
 | 
			
		||||
		panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
 | 
			
		||||
		      __func__, l1d_size * 2, l1d_size, &limit);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
	for_each_possible_cpu(cpu) {
 | 
			
		||||
		struct paca_struct *paca = paca_ptrs[cpu];
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -15,6 +15,9 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
 | 
			
		|||
		p = kzalloc(size, mask);
 | 
			
		||||
	else {
 | 
			
		||||
		p = memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
		if (!p)
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
			      size);
 | 
			
		||||
	}
 | 
			
		||||
	return p;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -882,8 +882,12 @@ static void __init htab_initialize(void)
 | 
			
		|||
		}
 | 
			
		||||
#endif /* CONFIG_PPC_CELL */
 | 
			
		||||
 | 
			
		||||
		table = memblock_alloc_base(htab_size_bytes, htab_size_bytes,
 | 
			
		||||
					    limit);
 | 
			
		||||
		table = memblock_phys_alloc_range(htab_size_bytes,
 | 
			
		||||
						  htab_size_bytes,
 | 
			
		||||
						  0, limit);
 | 
			
		||||
		if (!table)
 | 
			
		||||
			panic("ERROR: Failed to allocate %pa bytes below %pa\n",
 | 
			
		||||
			      &htab_size_bytes, &limit);
 | 
			
		||||
 | 
			
		||||
		DBG("Hash table allocated at %lx, size: %lx\n", table,
 | 
			
		||||
		    htab_size_bytes);
 | 
			
		||||
| 
						 | 
				
			
			@ -911,6 +915,9 @@ static void __init htab_initialize(void)
 | 
			
		|||
		linear_map_hash_slots = memblock_alloc_try_nid(
 | 
			
		||||
				linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
 | 
			
		||||
				ppc64_rma_size,	NUMA_NO_NODE);
 | 
			
		||||
		if (!linear_map_hash_slots)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
 | 
			
		||||
			      __func__, linear_map_hash_count, &ppc64_rma_size);
 | 
			
		||||
	}
 | 
			
		||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -461,10 +461,19 @@ void __init mmu_context_init(void)
 | 
			
		|||
	 * Allocate the maps used by context management
 | 
			
		||||
	 */
 | 
			
		||||
	context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!context_map)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      CTX_MAP_SIZE);
 | 
			
		||||
	context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
 | 
			
		||||
				    SMP_CACHE_BYTES);
 | 
			
		||||
	if (!context_mm)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(void *) * (LAST_CONTEXT + 1));
 | 
			
		||||
#ifdef CONFIG_SMP
 | 
			
		||||
	stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!stale_map[boot_cpuid])
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      CTX_MAP_SIZE);
 | 
			
		||||
 | 
			
		||||
	cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
 | 
			
		||||
				  "powerpc/mmu/ctx:prepare",
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -788,6 +788,10 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
 | 
			
		|||
	int tnid;
 | 
			
		||||
 | 
			
		||||
	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
 | 
			
		||||
	if (!nd_pa)
 | 
			
		||||
		panic("Cannot allocate %zu bytes for node %d data\n",
 | 
			
		||||
		      nd_size, nid);
 | 
			
		||||
 | 
			
		||||
	nd = __va(nd_pa);
 | 
			
		||||
 | 
			
		||||
	/* report and initialize */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -57,8 +57,16 @@ void vmemmap_remove_mapping(unsigned long start,
 | 
			
		|||
 | 
			
		||||
static __ref void *early_alloc_pgtable(unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	return memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
 | 
			
		||||
				      __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
 | 
			
		||||
	void *ptr;
 | 
			
		||||
 | 
			
		||||
	ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
 | 
			
		||||
				     __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
 | 
			
		||||
 | 
			
		||||
	if (!ptr)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
 | 
			
		||||
		      __func__, size, size, __pa(MAX_DMA_ADDRESS));
 | 
			
		||||
 | 
			
		||||
	return ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -197,6 +197,9 @@ void __init mmu_partition_table_init(void)
 | 
			
		|||
	BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
 | 
			
		||||
	/* Initialize the Partition Table with no entries */
 | 
			
		||||
	partition_tb = memblock_alloc(patb_size, patb_size);
 | 
			
		||||
	if (!partition_tb)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, patb_size, patb_size);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * update partition table control register,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -53,13 +53,20 @@ static __ref void *early_alloc_pgtable(unsigned long size, int nid,
 | 
			
		|||
{
 | 
			
		||||
	phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
 | 
			
		||||
	phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
 | 
			
		||||
	void *ptr;
 | 
			
		||||
 | 
			
		||||
	if (region_start)
 | 
			
		||||
		min_addr = region_start;
 | 
			
		||||
	if (region_end)
 | 
			
		||||
		max_addr = region_end;
 | 
			
		||||
 | 
			
		||||
	return memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
 | 
			
		||||
	ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
 | 
			
		||||
 | 
			
		||||
	if (!ptr)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
 | 
			
		||||
		      __func__, size, size, nid, &min_addr, &max_addr);
 | 
			
		||||
 | 
			
		||||
	return ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int early_map_kernel_page(unsigned long ea, unsigned long pa,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -340,6 +340,9 @@ void __init MMU_init_hw(void)
 | 
			
		|||
	 */
 | 
			
		||||
	if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
 | 
			
		||||
	Hash = memblock_alloc(Hash_size, Hash_size);
 | 
			
		||||
	if (!Hash)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, Hash_size, Hash_size);
 | 
			
		||||
	_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
 | 
			
		||||
 | 
			
		||||
	Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -211,6 +211,9 @@ static int __init iob_init(struct device_node *dn)
 | 
			
		|||
	iob_l2_base = memblock_alloc_try_nid_raw(1UL << 21, 1UL << 21,
 | 
			
		||||
					MEMBLOCK_LOW_LIMIT, 0x80000000,
 | 
			
		||||
					NUMA_NO_NODE);
 | 
			
		||||
	if (!iob_l2_base)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%x\n",
 | 
			
		||||
		      __func__, 1UL << 21, 1UL << 21, 0x80000000);
 | 
			
		||||
 | 
			
		||||
	pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -519,6 +519,9 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
	nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!nvram_image)
 | 
			
		||||
		panic("%s: Failed to allocate %u bytes\n", __func__,
 | 
			
		||||
		      NVRAM_SIZE);
 | 
			
		||||
	nvram_data = ioremap(addr, NVRAM_SIZE*2);
 | 
			
		||||
	nvram_naddrs = 1; /* Make sure we get the correct case */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -171,6 +171,9 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
 | 
			
		|||
	 * Allocate a buffer to hold the MC recoverable ranges.
 | 
			
		||||
	 */
 | 
			
		||||
	mc_recoverable_range = memblock_alloc(size, __alignof__(u64));
 | 
			
		||||
	if (!mc_recoverable_range)
 | 
			
		||||
		panic("%s: Failed to allocate %u bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, size, __alignof__(u64));
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < mc_recoverable_range_len; i++) {
 | 
			
		||||
		mc_recoverable_range[i].start_addr =
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3657,6 +3657,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
 | 
			
		|||
	pr_debug("  PHB-ID  : 0x%016llx\n", phb_id);
 | 
			
		||||
 | 
			
		||||
	phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES);
 | 
			
		||||
	if (!phb)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(*phb));
 | 
			
		||||
 | 
			
		||||
	/* Allocate PCI controller */
 | 
			
		||||
	phb->hose = hose = pcibios_alloc_controller(np);
 | 
			
		||||
| 
						 | 
				
			
			@ -3703,6 +3706,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
 | 
			
		|||
		phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
 | 
			
		||||
 | 
			
		||||
	phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!phb->diag_data)
 | 
			
		||||
		panic("%s: Failed to allocate %u bytes\n", __func__,
 | 
			
		||||
		      phb->diag_data_size);
 | 
			
		||||
 | 
			
		||||
	/* Parse 32-bit and IO ranges (if any) */
 | 
			
		||||
	pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
 | 
			
		||||
| 
						 | 
				
			
			@ -3762,6 +3768,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
 | 
			
		|||
	pemap_off = size;
 | 
			
		||||
	size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
 | 
			
		||||
	aux = memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!aux)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes\n", __func__, size);
 | 
			
		||||
	phb->ioda.pe_alloc = aux;
 | 
			
		||||
	phb->ioda.m64_segmap = aux + m64map_off;
 | 
			
		||||
	phb->ioda.m32_segmap = aux + m32map_off;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -127,6 +127,9 @@ static void __init prealloc(struct ps3_prealloc *p)
 | 
			
		|||
		return;
 | 
			
		||||
 | 
			
		||||
	p->address = memblock_alloc(p->size, p->align);
 | 
			
		||||
	if (!p->address)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, p->size, p->align);
 | 
			
		||||
 | 
			
		||||
	printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
 | 
			
		||||
	       p->address);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -265,6 +265,9 @@ static void allocate_dart(void)
 | 
			
		|||
	 * prefetching into invalid pages and corrupting data
 | 
			
		||||
	 */
 | 
			
		||||
	tmp = memblock_phys_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
 | 
			
		||||
	if (!tmp)
 | 
			
		||||
		panic("DART: table allocation failed\n");
 | 
			
		||||
 | 
			
		||||
	dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
 | 
			
		||||
					 DARTMAP_RPNMASK);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -129,6 +129,9 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
 | 
			
		|||
		bmp->bitmap = kzalloc(size, GFP_KERNEL);
 | 
			
		||||
	else {
 | 
			
		||||
		bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
		if (!bmp->bitmap)
 | 
			
		||||
			panic("%s: Failed to allocate %u bytes\n", __func__,
 | 
			
		||||
			      size);
 | 
			
		||||
		/* the bitmap won't be freed from memblock allocator */
 | 
			
		||||
		kmemleak_not_leak(bmp->bitmap);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,6 +61,9 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
 | 
			
		|||
	struct save_area *sa;
 | 
			
		||||
 | 
			
		||||
	sa = (void *) memblock_phys_alloc(sizeof(*sa), 8);
 | 
			
		||||
	if (!sa)
 | 
			
		||||
		panic("Failed to allocate save area\n");
 | 
			
		||||
 | 
			
		||||
	if (is_boot_cpu)
 | 
			
		||||
		list_add(&sa->list, &dump_save_areas);
 | 
			
		||||
	else
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -378,6 +378,10 @@ static void __init setup_lowcore_dat_off(void)
 | 
			
		|||
	 */
 | 
			
		||||
	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
 | 
			
		||||
	lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
 | 
			
		||||
	if (!lc)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes align=%zx\n",
 | 
			
		||||
		      __func__, sizeof(*lc), sizeof(*lc));
 | 
			
		||||
 | 
			
		||||
	lc->restart_psw.mask = PSW_KERNEL_BITS;
 | 
			
		||||
	lc->restart_psw.addr = (unsigned long) restart_int_handler;
 | 
			
		||||
	lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
 | 
			
		||||
| 
						 | 
				
			
			@ -419,6 +423,9 @@ static void __init setup_lowcore_dat_off(void)
 | 
			
		|||
	 * all CPUs in cast *one* of them does a PSW restart.
 | 
			
		||||
	 */
 | 
			
		||||
	restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
 | 
			
		||||
	if (!restart_stack)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, THREAD_SIZE, THREAD_SIZE);
 | 
			
		||||
	restart_stack += STACK_INIT_OFFSET;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -495,6 +502,9 @@ static void __init setup_resources(void)
 | 
			
		|||
 | 
			
		||||
	for_each_memblock(memory, reg) {
 | 
			
		||||
		res = memblock_alloc(sizeof(*res), 8);
 | 
			
		||||
		if (!res)
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 | 
			
		||||
			      __func__, sizeof(*res), 8);
 | 
			
		||||
		res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
 | 
			
		||||
 | 
			
		||||
		res->name = "System RAM";
 | 
			
		||||
| 
						 | 
				
			
			@ -509,6 +519,9 @@ static void __init setup_resources(void)
 | 
			
		|||
				continue;
 | 
			
		||||
			if (std_res->end > res->end) {
 | 
			
		||||
				sub_res = memblock_alloc(sizeof(*sub_res), 8);
 | 
			
		||||
				if (!sub_res)
 | 
			
		||||
					panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 | 
			
		||||
					      __func__, sizeof(*sub_res), 8);
 | 
			
		||||
				*sub_res = *std_res;
 | 
			
		||||
				sub_res->end = res->end;
 | 
			
		||||
				std_res->start = res->end + 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -966,6 +979,9 @@ static void __init setup_randomness(void)
 | 
			
		|||
 | 
			
		||||
	vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
 | 
			
		||||
							    PAGE_SIZE);
 | 
			
		||||
	if (!vmms)
 | 
			
		||||
		panic("Failed to allocate memory for sysinfo structure\n");
 | 
			
		||||
 | 
			
		||||
	if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
 | 
			
		||||
		add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
 | 
			
		||||
	memblock_free((unsigned long) vmms, PAGE_SIZE);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -656,7 +656,11 @@ void __init smp_save_dump_cpus(void)
 | 
			
		|||
		/* No previous system present, normal boot. */
 | 
			
		||||
		return;
 | 
			
		||||
	/* Allocate a page as dumping area for the store status sigps */
 | 
			
		||||
	page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
 | 
			
		||||
	page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
 | 
			
		||||
	if (!page)
 | 
			
		||||
		panic("ERROR: Failed to allocate %lx bytes below %lx\n",
 | 
			
		||||
		      PAGE_SIZE, 1UL << 31);
 | 
			
		||||
 | 
			
		||||
	/* Set multi-threading state to the previous system. */
 | 
			
		||||
	pcpu_set_smt(sclp.mtid_prev);
 | 
			
		||||
	boot_cpu_addr = stap();
 | 
			
		||||
| 
						 | 
				
			
			@ -766,6 +770,9 @@ void __init smp_detect_cpus(void)
 | 
			
		|||
 | 
			
		||||
	/* Get CPU information */
 | 
			
		||||
	info = memblock_alloc(sizeof(*info), 8);
 | 
			
		||||
	if (!info)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 | 
			
		||||
		      __func__, sizeof(*info), 8);
 | 
			
		||||
	smp_get_core_info(info, 1);
 | 
			
		||||
	/* Find boot CPU type */
 | 
			
		||||
	if (sclp.has_core_type) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -520,6 +520,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
 | 
			
		|||
	nr_masks = max(nr_masks, 1);
 | 
			
		||||
	for (i = 0; i < nr_masks; i++) {
 | 
			
		||||
		mask->next = memblock_alloc(sizeof(*mask->next), 8);
 | 
			
		||||
		if (!mask->next)
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 | 
			
		||||
			      __func__, sizeof(*mask->next), 8);
 | 
			
		||||
		mask = mask->next;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -538,6 +541,9 @@ void __init topology_init_early(void)
 | 
			
		|||
	if (!MACHINE_HAS_TOPOLOGY)
 | 
			
		||||
		goto out;
 | 
			
		||||
	tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!tl_info)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	info = tl_info;
 | 
			
		||||
	store_topology(info);
 | 
			
		||||
	pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -313,6 +313,9 @@ static void __ref create_core_to_node_map(void)
 | 
			
		|||
	int i;
 | 
			
		||||
 | 
			
		||||
	emu_cores = memblock_alloc(sizeof(*emu_cores), 8);
 | 
			
		||||
	if (!emu_cores)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 | 
			
		||||
		      __func__, sizeof(*emu_cores), 8);
 | 
			
		||||
	for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
 | 
			
		||||
		emu_cores->to_node_id[i] = NODE_ID_FREE;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -92,8 +92,12 @@ static void __init numa_setup_memory(void)
 | 
			
		|||
	} while (cur_base < end_of_dram);
 | 
			
		||||
 | 
			
		||||
	/* Allocate and fill out node_data */
 | 
			
		||||
	for (nid = 0; nid < MAX_NUMNODES; nid++)
 | 
			
		||||
	for (nid = 0; nid < MAX_NUMNODES; nid++) {
 | 
			
		||||
		NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8);
 | 
			
		||||
		if (!NODE_DATA(nid))
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 | 
			
		||||
			      __func__, sizeof(pg_data_t), 8);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for_each_online_node(nid) {
 | 
			
		||||
		unsigned long start_pfn, end_pfn;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -556,7 +556,10 @@ static void __init ap325rxa_mv_mem_reserve(void)
 | 
			
		|||
	phys_addr_t phys;
 | 
			
		||||
	phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
 | 
			
		||||
 | 
			
		||||
	phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
 | 
			
		||||
	phys = memblock_phys_alloc(size, PAGE_SIZE);
 | 
			
		||||
	if (!phys)
 | 
			
		||||
		panic("Failed to allocate CEU memory\n");
 | 
			
		||||
 | 
			
		||||
	memblock_free(phys, size);
 | 
			
		||||
	memblock_remove(phys, size);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1476,12 +1476,18 @@ static void __init ecovec_mv_mem_reserve(void)
 | 
			
		|||
	phys_addr_t phys;
 | 
			
		||||
	phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
 | 
			
		||||
 | 
			
		||||
	phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
 | 
			
		||||
	phys = memblock_phys_alloc(size, PAGE_SIZE);
 | 
			
		||||
	if (!phys)
 | 
			
		||||
		panic("Failed to allocate CEU0 memory\n");
 | 
			
		||||
 | 
			
		||||
	memblock_free(phys, size);
 | 
			
		||||
	memblock_remove(phys, size);
 | 
			
		||||
	ceu0_dma_membase = phys;
 | 
			
		||||
 | 
			
		||||
	phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
 | 
			
		||||
	phys = memblock_phys_alloc(size, PAGE_SIZE);
 | 
			
		||||
	if (!phys)
 | 
			
		||||
		panic("Failed to allocate CEU1 memory\n");
 | 
			
		||||
 | 
			
		||||
	memblock_free(phys, size);
 | 
			
		||||
	memblock_remove(phys, size);
 | 
			
		||||
	ceu1_dma_membase = phys;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -630,7 +630,10 @@ static void __init kfr2r09_mv_mem_reserve(void)
 | 
			
		|||
	phys_addr_t phys;
 | 
			
		||||
	phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
 | 
			
		||||
 | 
			
		||||
	phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
 | 
			
		||||
	phys = memblock_phys_alloc(size, PAGE_SIZE);
 | 
			
		||||
	if (!phys)
 | 
			
		||||
		panic("Failed to allocate CEU memory\n");
 | 
			
		||||
 | 
			
		||||
	memblock_free(phys, size);
 | 
			
		||||
	memblock_remove(phys, size);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -630,7 +630,10 @@ static void __init migor_mv_mem_reserve(void)
 | 
			
		|||
	phys_addr_t phys;
 | 
			
		||||
	phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
 | 
			
		||||
 | 
			
		||||
	phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
 | 
			
		||||
	phys = memblock_phys_alloc(size, PAGE_SIZE);
 | 
			
		||||
	if (!phys)
 | 
			
		||||
		panic("Failed to allocate CEU memory\n");
 | 
			
		||||
 | 
			
		||||
	memblock_free(phys, size);
 | 
			
		||||
	memblock_remove(phys, size);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -963,12 +963,18 @@ static void __init ms7724se_mv_mem_reserve(void)
 | 
			
		|||
	phys_addr_t phys;
 | 
			
		||||
	phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
 | 
			
		||||
 | 
			
		||||
	phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
 | 
			
		||||
	phys = memblock_phys_alloc(size, PAGE_SIZE);
 | 
			
		||||
	if (!phys)
 | 
			
		||||
		panic("Failed to allocate CEU0 memory\n");
 | 
			
		||||
 | 
			
		||||
	memblock_free(phys, size);
 | 
			
		||||
	memblock_remove(phys, size);
 | 
			
		||||
	ceu0_dma_membase = phys;
 | 
			
		||||
 | 
			
		||||
	phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
 | 
			
		||||
	phys = memblock_phys_alloc(size, PAGE_SIZE);
 | 
			
		||||
	if (!phys)
 | 
			
		||||
		panic("Failed to allocate CEU1 memory\n");
 | 
			
		||||
 | 
			
		||||
	memblock_free(phys, size);
 | 
			
		||||
	memblock_remove(phys, size);
 | 
			
		||||
	ceu1_dma_membase = phys;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -168,7 +168,8 @@ void __init reserve_crashkernel(void)
 | 
			
		|||
	crash_size = PAGE_ALIGN(resource_size(&crashk_res));
 | 
			
		||||
	if (!crashk_res.start) {
 | 
			
		||||
		unsigned long max = memblock_end_of_DRAM() - memory_limit;
 | 
			
		||||
		crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max);
 | 
			
		||||
		crashk_res.start = memblock_phys_alloc_range(crash_size,
 | 
			
		||||
							     PAGE_SIZE, 0, max);
 | 
			
		||||
		if (!crashk_res.start) {
 | 
			
		||||
			pr_err("crashkernel allocation failed\n");
 | 
			
		||||
			goto disable;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -128,6 +128,9 @@ static pmd_t * __init one_md_table_init(pud_t *pud)
 | 
			
		|||
		pmd_t *pmd;
 | 
			
		||||
 | 
			
		||||
		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		if (!pmd)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
			      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		pud_populate(&init_mm, pud, pmd);
 | 
			
		||||
		BUG_ON(pmd != pmd_offset(pud, 0));
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -141,6 +144,9 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
 | 
			
		|||
		pte_t *pte;
 | 
			
		||||
 | 
			
		||||
		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		if (!pte)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
			      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		pmd_populate_kernel(&init_mm, pmd, pte);
 | 
			
		||||
		BUG_ON(pte != pte_offset_kernel(pmd, 0));
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -196,7 +202,7 @@ void __init allocate_pgdat(unsigned int nid)
 | 
			
		|||
	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
 | 
			
		||||
	NODE_DATA(nid) = memblock_alloc_try_nid_nopanic(
 | 
			
		||||
	NODE_DATA(nid) = memblock_alloc_try_nid(
 | 
			
		||||
				sizeof(struct pglist_data),
 | 
			
		||||
				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
 | 
			
		||||
				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -43,6 +43,10 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
 | 
			
		|||
	/* Node-local pgdat */
 | 
			
		||||
	NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
 | 
			
		||||
					     SMP_CACHE_BYTES, nid);
 | 
			
		||||
	if (!NODE_DATA(nid))
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes align=0x%x nid=%d\n",
 | 
			
		||||
		      __func__, sizeof(struct pglist_data), SMP_CACHE_BYTES,
 | 
			
		||||
		      nid);
 | 
			
		||||
 | 
			
		||||
	NODE_DATA(nid)->node_start_pfn = start_pfn;
 | 
			
		||||
	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -32,9 +32,9 @@ void * __init prom_early_alloc(unsigned long size)
 | 
			
		|||
{
 | 
			
		||||
	void *ret;
 | 
			
		||||
 | 
			
		||||
	ret = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
 | 
			
		||||
	if (ret != NULL)
 | 
			
		||||
		memset(ret, 0, size);
 | 
			
		||||
	ret = memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!ret)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes\n", __func__, size);
 | 
			
		||||
 | 
			
		||||
	prom_early_allocated += size;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -624,8 +624,14 @@ void __init alloc_irqstack_bootmem(void)
 | 
			
		|||
 | 
			
		||||
		softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
 | 
			
		||||
						       THREAD_SIZE, node);
 | 
			
		||||
		if (!softirq_stack[i])
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
 | 
			
		||||
			      __func__, THREAD_SIZE, THREAD_SIZE, node);
 | 
			
		||||
		hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
 | 
			
		||||
						       THREAD_SIZE, node);
 | 
			
		||||
		if (!hardirq_stack[i])
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
 | 
			
		||||
			      __func__, THREAD_SIZE, THREAD_SIZE, node);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1628,6 +1628,8 @@ static void __init pcpu_populate_pte(unsigned long addr)
 | 
			
		|||
		pud_t *new;
 | 
			
		||||
 | 
			
		||||
		new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		if (!new)
 | 
			
		||||
			goto err_alloc;
 | 
			
		||||
		pgd_populate(&init_mm, pgd, new);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1636,6 +1638,8 @@ static void __init pcpu_populate_pte(unsigned long addr)
 | 
			
		|||
		pmd_t *new;
 | 
			
		||||
 | 
			
		||||
		new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		if (!new)
 | 
			
		||||
			goto err_alloc;
 | 
			
		||||
		pud_populate(&init_mm, pud, new);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1644,8 +1648,16 @@ static void __init pcpu_populate_pte(unsigned long addr)
 | 
			
		|||
		pte_t *new;
 | 
			
		||||
 | 
			
		||||
		new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
		if (!new)
 | 
			
		||||
			goto err_alloc;
 | 
			
		||||
		pmd_populate_kernel(&init_mm, pmd, new);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return;
 | 
			
		||||
 | 
			
		||||
err_alloc:
 | 
			
		||||
	panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
 | 
			
		||||
	      __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __init setup_per_cpu_areas(void)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -264,7 +264,7 @@ void __init mem_init(void)
 | 
			
		|||
	i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
 | 
			
		||||
	i += 1;
 | 
			
		||||
	sparc_valid_addr_bitmap = (unsigned long *)
 | 
			
		||||
		memblock_alloc_from(i << 2, SMP_CACHE_BYTES, 0UL);
 | 
			
		||||
		memblock_alloc(i << 2, SMP_CACHE_BYTES);
 | 
			
		||||
 | 
			
		||||
	if (sparc_valid_addr_bitmap == NULL) {
 | 
			
		||||
		prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1809,6 +1809,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 | 
			
		|||
 | 
			
		||||
			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 | 
			
		||||
						  PAGE_SIZE);
 | 
			
		||||
			if (!new)
 | 
			
		||||
				goto err_alloc;
 | 
			
		||||
			alloc_bytes += PAGE_SIZE;
 | 
			
		||||
			pgd_populate(&init_mm, pgd, new);
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -1822,6 +1824,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 | 
			
		|||
			}
 | 
			
		||||
			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 | 
			
		||||
						  PAGE_SIZE);
 | 
			
		||||
			if (!new)
 | 
			
		||||
				goto err_alloc;
 | 
			
		||||
			alloc_bytes += PAGE_SIZE;
 | 
			
		||||
			pud_populate(&init_mm, pud, new);
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -1836,6 +1840,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 | 
			
		|||
			}
 | 
			
		||||
			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 | 
			
		||||
						  PAGE_SIZE);
 | 
			
		||||
			if (!new)
 | 
			
		||||
				goto err_alloc;
 | 
			
		||||
			alloc_bytes += PAGE_SIZE;
 | 
			
		||||
			pmd_populate_kernel(&init_mm, pmd, new);
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -1855,6 +1861,11 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	return alloc_bytes;
 | 
			
		||||
 | 
			
		||||
err_alloc:
 | 
			
		||||
	panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
 | 
			
		||||
	      __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	return -ENOMEM;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __init flush_all_kernel_tsbs(void)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -303,13 +303,19 @@ static void __init srmmu_nocache_init(void)
 | 
			
		|||
 | 
			
		||||
	bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
 | 
			
		||||
 | 
			
		||||
	srmmu_nocache_pool = memblock_alloc_from(srmmu_nocache_size,
 | 
			
		||||
						 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
 | 
			
		||||
	srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
 | 
			
		||||
					    SRMMU_NOCACHE_ALIGN_MAX);
 | 
			
		||||
	if (!srmmu_nocache_pool)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%x\n",
 | 
			
		||||
		      __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);
 | 
			
		||||
	memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
 | 
			
		||||
 | 
			
		||||
	srmmu_nocache_bitmap =
 | 
			
		||||
		memblock_alloc_from(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
 | 
			
		||||
				    SMP_CACHE_BYTES, 0UL);
 | 
			
		||||
		memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
 | 
			
		||||
			       SMP_CACHE_BYTES);
 | 
			
		||||
	if (!srmmu_nocache_bitmap)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      BITS_TO_LONGS(bitmap_bits) * sizeof(long));
 | 
			
		||||
	bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
 | 
			
		||||
 | 
			
		||||
	srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
 | 
			
		||||
| 
						 | 
				
			
			@ -467,7 +473,9 @@ static void __init sparc_context_init(int numctx)
 | 
			
		|||
	unsigned long size;
 | 
			
		||||
 | 
			
		||||
	size = numctx * sizeof(struct ctx_list);
 | 
			
		||||
	ctx_list_pool = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
 | 
			
		||||
	ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!ctx_list_pool)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes\n", __func__, size);
 | 
			
		||||
 | 
			
		||||
	for (ctx = 0; ctx < numctx; ctx++) {
 | 
			
		||||
		struct ctx_list *clist;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -649,6 +649,9 @@ static int __init eth_setup(char *str)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
 | 
			
		||||
	if (!new)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(*new));
 | 
			
		||||
 | 
			
		||||
	INIT_LIST_HEAD(&new->list);
 | 
			
		||||
	new->index = n;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1576,6 +1576,9 @@ static int __init vector_setup(char *str)
 | 
			
		|||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
	new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
 | 
			
		||||
	if (!new)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(*new));
 | 
			
		||||
	INIT_LIST_HEAD(&new->list);
 | 
			
		||||
	new->unit = n;
 | 
			
		||||
	new->arguments = str;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -37,6 +37,8 @@ int __init read_initrd(void)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	area = memblock_alloc(size, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!area)
 | 
			
		||||
		panic("%s: Failed to allocate %llu bytes\n", __func__, size);
 | 
			
		||||
 | 
			
		||||
	if (load_initrd(initrd, area, size) == -1)
 | 
			
		||||
		return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -66,6 +66,10 @@ static void __init one_page_table_init(pmd_t *pmd)
 | 
			
		|||
	if (pmd_none(*pmd)) {
 | 
			
		||||
		pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
 | 
			
		||||
							  PAGE_SIZE);
 | 
			
		||||
		if (!pte)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=%lx\n",
 | 
			
		||||
			      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
		set_pmd(pmd, __pmd(_KERNPG_TABLE +
 | 
			
		||||
					   (unsigned long) __pa(pte)));
 | 
			
		||||
		if (pte != pte_offset_kernel(pmd, 0))
 | 
			
		||||
| 
						 | 
				
			
			@ -77,6 +81,10 @@ static void __init one_md_table_init(pud_t *pud)
 | 
			
		|||
{
 | 
			
		||||
#ifdef CONFIG_3_LEVEL_PGTABLES
 | 
			
		||||
	pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!pmd_table)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
 | 
			
		||||
	if (pmd_table != pmd_offset(pud, 0))
 | 
			
		||||
		BUG();
 | 
			
		||||
| 
						 | 
				
			
			@ -126,6 +134,10 @@ static void __init fixaddr_user_init( void)
 | 
			
		|||
 | 
			
		||||
	fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
 | 
			
		||||
	v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
 | 
			
		||||
	if (!v)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=%lx\n",
 | 
			
		||||
		      __func__, size, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	memcpy((void *) v , (void *) FIXADDR_USER_START, size);
 | 
			
		||||
	p = __pa(v);
 | 
			
		||||
	for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
 | 
			
		||||
| 
						 | 
				
			
			@ -146,6 +158,10 @@ void __init paging_init(void)
 | 
			
		|||
 | 
			
		||||
	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
 | 
			
		||||
							       PAGE_SIZE);
 | 
			
		||||
	if (!empty_zero_page)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < ARRAY_SIZE(zones_size); i++)
 | 
			
		||||
		zones_size[i] = 0;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -207,6 +207,10 @@ request_standard_resources(struct meminfo *mi)
 | 
			
		|||
			continue;
 | 
			
		||||
 | 
			
		||||
		res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES);
 | 
			
		||||
		if (!res)
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes align=%x\n",
 | 
			
		||||
			      __func__, sizeof(*res), SMP_CACHE_BYTES);
 | 
			
		||||
 | 
			
		||||
		res->name  = "System RAM";
 | 
			
		||||
		res->start = mi->bank[i].start;
 | 
			
		||||
		res->end   = mi->bank[i].start + mi->bank[i].size - 1;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -145,8 +145,13 @@ static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
 | 
			
		|||
		unsigned long prot)
 | 
			
		||||
{
 | 
			
		||||
	if (pmd_none(*pmd)) {
 | 
			
		||||
		pte_t *pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t),
 | 
			
		||||
					    PTRS_PER_PTE * sizeof(pte_t));
 | 
			
		||||
		size_t size = PTRS_PER_PTE * sizeof(pte_t);
 | 
			
		||||
		pte_t *pte = memblock_alloc(size, size);
 | 
			
		||||
 | 
			
		||||
		if (!pte)
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes align=%zx\n",
 | 
			
		||||
			      __func__, size, size);
 | 
			
		||||
 | 
			
		||||
		__pmd_populate(pmd, __pa(pte) | prot);
 | 
			
		||||
	}
 | 
			
		||||
	BUG_ON(pmd_bad(*pmd));
 | 
			
		||||
| 
						 | 
				
			
			@ -349,6 +354,9 @@ static void __init devicemaps_init(void)
 | 
			
		|||
	 * Allocate the vector page early.
 | 
			
		||||
	 */
 | 
			
		||||
	vectors = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!vectors)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
 | 
			
		||||
		pmd_clear(pmd_off_k(addr));
 | 
			
		||||
| 
						 | 
				
			
			@ -426,6 +434,9 @@ void __init paging_init(void)
 | 
			
		|||
 | 
			
		||||
	/* allocate the zero page. */
 | 
			
		||||
	zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (!zero_page)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
		      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	bootmem_init();
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -935,6 +935,9 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
 | 
			
		|||
#define HPET_RESOURCE_NAME_SIZE 9
 | 
			
		||||
	hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
 | 
			
		||||
				  SMP_CACHE_BYTES);
 | 
			
		||||
	if (!hpet_res)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
 | 
			
		||||
 | 
			
		||||
	hpet_res->name = (void *)&hpet_res[1];
 | 
			
		||||
	hpet_res->flags = IORESOURCE_MEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2581,6 +2581,8 @@ static struct resource * __init ioapic_setup_resources(void)
 | 
			
		|||
	n *= nr_ioapics;
 | 
			
		||||
 | 
			
		||||
	mem = memblock_alloc(n, SMP_CACHE_BYTES);
 | 
			
		||||
	if (!mem)
 | 
			
		||||
		panic("%s: Failed to allocate %lu bytes\n", __func__, n);
 | 
			
		||||
	res = (void *)mem;
 | 
			
		||||
 | 
			
		||||
	mem += sizeof(struct resource) * nr_ioapics;
 | 
			
		||||
| 
						 | 
				
			
			@ -2625,6 +2627,9 @@ void __init io_apic_init_mappings(void)
 | 
			
		|||
#endif
 | 
			
		||||
			ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
 | 
			
		||||
								    PAGE_SIZE);
 | 
			
		||||
			if (!ioapic_phys)
 | 
			
		||||
				panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
				      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
			ioapic_phys = __pa(ioapic_phys);
 | 
			
		||||
		}
 | 
			
		||||
		set_fixmap_nocache(idx, ioapic_phys);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -776,7 +776,7 @@ u64 __init e820__memblock_alloc_reserved(u64 size, u64 align)
 | 
			
		|||
{
 | 
			
		||||
	u64 addr;
 | 
			
		||||
 | 
			
		||||
	addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
 | 
			
		||||
	addr = memblock_phys_alloc(size, align);
 | 
			
		||||
	if (addr) {
 | 
			
		||||
		e820__range_update_kexec(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
 | 
			
		||||
		pr_info("update e820_table_kexec for e820__memblock_alloc_reserved()\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -1097,6 +1097,9 @@ void __init e820__reserve_resources(void)
 | 
			
		|||
 | 
			
		||||
	res = memblock_alloc(sizeof(*res) * e820_table->nr_entries,
 | 
			
		||||
			     SMP_CACHE_BYTES);
 | 
			
		||||
	if (!res)
 | 
			
		||||
		panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
		      sizeof(*res) * e820_table->nr_entries);
 | 
			
		||||
	e820_res = res;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < e820_table->nr_entries; i++) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -106,22 +106,22 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
 | 
			
		|||
	void *ptr;
 | 
			
		||||
 | 
			
		||||
	if (!node_online(node) || !NODE_DATA(node)) {
 | 
			
		||||
		ptr = memblock_alloc_from_nopanic(size, align, goal);
 | 
			
		||||
		ptr = memblock_alloc_from(size, align, goal);
 | 
			
		||||
		pr_info("cpu %d has no node %d or node-local memory\n",
 | 
			
		||||
			cpu, node);
 | 
			
		||||
		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
 | 
			
		||||
			 cpu, size, __pa(ptr));
 | 
			
		||||
	} else {
 | 
			
		||||
		ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
 | 
			
		||||
						     MEMBLOCK_ALLOC_ACCESSIBLE,
 | 
			
		||||
						     node);
 | 
			
		||||
		ptr = memblock_alloc_try_nid(size, align, goal,
 | 
			
		||||
					     MEMBLOCK_ALLOC_ACCESSIBLE,
 | 
			
		||||
					     node);
 | 
			
		||||
 | 
			
		||||
		pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
 | 
			
		||||
			 cpu, size, node, __pa(ptr));
 | 
			
		||||
	}
 | 
			
		||||
	return ptr;
 | 
			
		||||
#else
 | 
			
		||||
	return memblock_alloc_from_nopanic(size, align, goal);
 | 
			
		||||
	return memblock_alloc_from(size, align, goal);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -24,14 +24,16 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
 | 
			
		|||
 | 
			
		||||
static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
static __init void *early_alloc(size_t size, int nid, bool panic)
 | 
			
		||||
static __init void *early_alloc(size_t size, int nid, bool should_panic)
 | 
			
		||||
{
 | 
			
		||||
	if (panic)
 | 
			
		||||
		return memblock_alloc_try_nid(size, size,
 | 
			
		||||
			__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 | 
			
		||||
	else
 | 
			
		||||
		return memblock_alloc_try_nid_nopanic(size, size,
 | 
			
		||||
	void *ptr = memblock_alloc_try_nid(size, size,
 | 
			
		||||
			__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 | 
			
		||||
 | 
			
		||||
	if (!ptr && should_panic)
 | 
			
		||||
		panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
 | 
			
		||||
		      (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
 | 
			
		||||
 | 
			
		||||
	return ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -195,15 +195,11 @@ static void __init alloc_node_data(int nid)
 | 
			
		|||
	 * Allocate node data.  Try node-local memory and then any node.
 | 
			
		||||
	 * Never allocate in DMA zone.
 | 
			
		||||
	 */
 | 
			
		||||
	nd_pa = memblock_phys_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
 | 
			
		||||
	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
 | 
			
		||||
	if (!nd_pa) {
 | 
			
		||||
		nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
 | 
			
		||||
					      MEMBLOCK_ALLOC_ACCESSIBLE);
 | 
			
		||||
		if (!nd_pa) {
 | 
			
		||||
			pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
 | 
			
		||||
			       nd_size, nid);
 | 
			
		||||
			return;
 | 
			
		||||
		}
 | 
			
		||||
		pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
 | 
			
		||||
		       nd_size, nid);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
	nd = __va(nd_pa);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -141,6 +141,9 @@ void * __init prom_early_alloc(unsigned long size)
 | 
			
		|||
		 * wasted bootmem) and hand off chunks of it to callers.
 | 
			
		||||
		 */
 | 
			
		||||
		res = memblock_alloc(chunk_size, SMP_CACHE_BYTES);
 | 
			
		||||
		if (!res)
 | 
			
		||||
			panic("%s: Failed to allocate %zu bytes\n", __func__,
 | 
			
		||||
			      chunk_size);
 | 
			
		||||
		BUG_ON(!res);
 | 
			
		||||
		prom_early_allocated += chunk_size;
 | 
			
		||||
		memset(res, 0, chunk_size);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -181,8 +181,15 @@ static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
 | 
			
		|||
 | 
			
		||||
static void * __ref alloc_p2m_page(void)
 | 
			
		||||
{
 | 
			
		||||
	if (unlikely(!slab_is_available()))
 | 
			
		||||
		return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
	if (unlikely(!slab_is_available())) {
 | 
			
		||||
		void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
		if (!ptr)
 | 
			
		||||
			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 | 
			
		||||
			      __func__, PAGE_SIZE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
		return ptr;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return (void *)__get_free_page(GFP_KERNEL);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
		Reference in a new issue