mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	cpumask: change cpumask_of_cpu_ptr to use new cpumask_of_cpu
* Replace previous instances of the cpumask_of_cpu_ptr* macros
    with a the new (lvalue capable) generic cpumask_of_cpu().
Signed-off-by: Mike Travis <travis@sgi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
			
			
This commit is contained in:
		
							parent
							
								
									6524d938b3
								
							
						
					
					
						commit
						0bc3cc03fa
					
				
					 17 changed files with 37 additions and 83 deletions
				
			
		| 
						 | 
					@ -73,7 +73,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
 | 
				
			||||||
	struct cpuinfo_x86 *c = &cpu_data(cpu);
 | 
						struct cpuinfo_x86 *c = &cpu_data(cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpumask_t saved_mask;
 | 
						cpumask_t saved_mask;
 | 
				
			||||||
	cpumask_of_cpu_ptr(new_mask, cpu);
 | 
					 | 
				
			||||||
	int retval;
 | 
						int retval;
 | 
				
			||||||
	unsigned int eax, ebx, ecx, edx;
 | 
						unsigned int eax, ebx, ecx, edx;
 | 
				
			||||||
	unsigned int edx_part;
 | 
						unsigned int edx_part;
 | 
				
			||||||
| 
						 | 
					@ -92,7 +91,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Make sure we are running on right CPU */
 | 
						/* Make sure we are running on right CPU */
 | 
				
			||||||
	saved_mask = current->cpus_allowed;
 | 
						saved_mask = current->cpus_allowed;
 | 
				
			||||||
	retval = set_cpus_allowed_ptr(current, new_mask);
 | 
						retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
	if (retval)
 | 
						if (retval)
 | 
				
			||||||
		return -1;
 | 
							return -1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -200,12 +200,10 @@ static void drv_read(struct drv_cmd *cmd)
 | 
				
			||||||
static void drv_write(struct drv_cmd *cmd)
 | 
					static void drv_write(struct drv_cmd *cmd)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_t saved_mask = current->cpus_allowed;
 | 
						cpumask_t saved_mask = current->cpus_allowed;
 | 
				
			||||||
	cpumask_of_cpu_ptr_declare(cpu_mask);
 | 
					 | 
				
			||||||
	unsigned int i;
 | 
						unsigned int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_cpu_mask_nr(i, cmd->mask) {
 | 
						for_each_cpu_mask_nr(i, cmd->mask) {
 | 
				
			||||||
		cpumask_of_cpu_ptr_next(cpu_mask, i);
 | 
							set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
 | 
				
			||||||
		set_cpus_allowed_ptr(current, cpu_mask);
 | 
					 | 
				
			||||||
		do_drv_write(cmd);
 | 
							do_drv_write(cmd);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -269,12 +267,11 @@ static unsigned int get_measured_perf(unsigned int cpu)
 | 
				
			||||||
	} aperf_cur, mperf_cur;
 | 
						} aperf_cur, mperf_cur;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpumask_t saved_mask;
 | 
						cpumask_t saved_mask;
 | 
				
			||||||
	cpumask_of_cpu_ptr(cpu_mask, cpu);
 | 
					 | 
				
			||||||
	unsigned int perf_percent;
 | 
						unsigned int perf_percent;
 | 
				
			||||||
	unsigned int retval;
 | 
						unsigned int retval;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	saved_mask = current->cpus_allowed;
 | 
						saved_mask = current->cpus_allowed;
 | 
				
			||||||
	set_cpus_allowed_ptr(current, cpu_mask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
	if (get_cpu() != cpu) {
 | 
						if (get_cpu() != cpu) {
 | 
				
			||||||
		/* We were not able to run on requested processor */
 | 
							/* We were not able to run on requested processor */
 | 
				
			||||||
		put_cpu();
 | 
							put_cpu();
 | 
				
			||||||
| 
						 | 
					@ -340,7 +337,6 @@ static unsigned int get_measured_perf(unsigned int cpu)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 | 
					static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_of_cpu_ptr(cpu_mask, cpu);
 | 
					 | 
				
			||||||
	struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
 | 
						struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
 | 
				
			||||||
	unsigned int freq;
 | 
						unsigned int freq;
 | 
				
			||||||
	unsigned int cached_freq;
 | 
						unsigned int cached_freq;
 | 
				
			||||||
| 
						 | 
					@ -353,7 +349,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cached_freq = data->freq_table[data->acpi_data->state].frequency;
 | 
						cached_freq = data->freq_table[data->acpi_data->state].frequency;
 | 
				
			||||||
	freq = extract_freq(get_cur_val(cpu_mask), data);
 | 
						freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
 | 
				
			||||||
	if (freq != cached_freq) {
 | 
						if (freq != cached_freq) {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * The dreaded BIOS frequency change behind our back.
 | 
							 * The dreaded BIOS frequency change behind our back.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -479,12 +479,11 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
 | 
				
			||||||
static int check_supported_cpu(unsigned int cpu)
 | 
					static int check_supported_cpu(unsigned int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_t oldmask;
 | 
						cpumask_t oldmask;
 | 
				
			||||||
	cpumask_of_cpu_ptr(cpu_mask, cpu);
 | 
					 | 
				
			||||||
	u32 eax, ebx, ecx, edx;
 | 
						u32 eax, ebx, ecx, edx;
 | 
				
			||||||
	unsigned int rc = 0;
 | 
						unsigned int rc = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	oldmask = current->cpus_allowed;
 | 
						oldmask = current->cpus_allowed;
 | 
				
			||||||
	set_cpus_allowed_ptr(current, cpu_mask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (smp_processor_id() != cpu) {
 | 
						if (smp_processor_id() != cpu) {
 | 
				
			||||||
		printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
 | 
							printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
 | 
				
			||||||
| 
						 | 
					@ -1017,7 +1016,6 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
 | 
				
			||||||
static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
 | 
					static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_t oldmask;
 | 
						cpumask_t oldmask;
 | 
				
			||||||
	cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
 | 
					 | 
				
			||||||
	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
 | 
						struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
 | 
				
			||||||
	u32 checkfid;
 | 
						u32 checkfid;
 | 
				
			||||||
	u32 checkvid;
 | 
						u32 checkvid;
 | 
				
			||||||
| 
						 | 
					@ -1032,7 +1030,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* only run on specific CPU from here on */
 | 
						/* only run on specific CPU from here on */
 | 
				
			||||||
	oldmask = current->cpus_allowed;
 | 
						oldmask = current->cpus_allowed;
 | 
				
			||||||
	set_cpus_allowed_ptr(current, cpu_mask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (smp_processor_id() != pol->cpu) {
 | 
						if (smp_processor_id() != pol->cpu) {
 | 
				
			||||||
		printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
 | 
							printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
 | 
				
			||||||
| 
						 | 
					@ -1107,7 +1105,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct powernow_k8_data *data;
 | 
						struct powernow_k8_data *data;
 | 
				
			||||||
	cpumask_t oldmask;
 | 
						cpumask_t oldmask;
 | 
				
			||||||
	cpumask_of_cpu_ptr_declare(newmask);
 | 
					 | 
				
			||||||
	int rc;
 | 
						int rc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!cpu_online(pol->cpu))
 | 
						if (!cpu_online(pol->cpu))
 | 
				
			||||||
| 
						 | 
					@ -1159,8 +1156,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* only run on specific CPU from here on */
 | 
						/* only run on specific CPU from here on */
 | 
				
			||||||
	oldmask = current->cpus_allowed;
 | 
						oldmask = current->cpus_allowed;
 | 
				
			||||||
	cpumask_of_cpu_ptr_next(newmask, pol->cpu);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
 | 
				
			||||||
	set_cpus_allowed_ptr(current, newmask);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (smp_processor_id() != pol->cpu) {
 | 
						if (smp_processor_id() != pol->cpu) {
 | 
				
			||||||
		printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
 | 
							printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
 | 
				
			||||||
| 
						 | 
					@ -1182,7 +1178,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 | 
				
			||||||
	set_cpus_allowed_ptr(current, &oldmask);
 | 
						set_cpus_allowed_ptr(current, &oldmask);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (cpu_family == CPU_HW_PSTATE)
 | 
						if (cpu_family == CPU_HW_PSTATE)
 | 
				
			||||||
		pol->cpus = *newmask;
 | 
							pol->cpus = cpumask_of_cpu(pol->cpu);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		pol->cpus = per_cpu(cpu_core_map, pol->cpu);
 | 
							pol->cpus = per_cpu(cpu_core_map, pol->cpu);
 | 
				
			||||||
	data->available_cores = &(pol->cpus);
 | 
						data->available_cores = &(pol->cpus);
 | 
				
			||||||
| 
						 | 
					@ -1248,7 +1244,6 @@ static unsigned int powernowk8_get (unsigned int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct powernow_k8_data *data;
 | 
						struct powernow_k8_data *data;
 | 
				
			||||||
	cpumask_t oldmask = current->cpus_allowed;
 | 
						cpumask_t oldmask = current->cpus_allowed;
 | 
				
			||||||
	cpumask_of_cpu_ptr(newmask, cpu);
 | 
					 | 
				
			||||||
	unsigned int khz = 0;
 | 
						unsigned int khz = 0;
 | 
				
			||||||
	unsigned int first;
 | 
						unsigned int first;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1258,7 +1253,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
 | 
				
			||||||
	if (!data)
 | 
						if (!data)
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_cpus_allowed_ptr(current, newmask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
	if (smp_processor_id() != cpu) {
 | 
						if (smp_processor_id() != cpu) {
 | 
				
			||||||
		printk(KERN_ERR PFX
 | 
							printk(KERN_ERR PFX
 | 
				
			||||||
			"limiting to CPU %d failed in powernowk8_get\n", cpu);
 | 
								"limiting to CPU %d failed in powernowk8_get\n", cpu);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu)
 | 
				
			||||||
	unsigned l, h;
 | 
						unsigned l, h;
 | 
				
			||||||
	unsigned clock_freq;
 | 
						unsigned clock_freq;
 | 
				
			||||||
	cpumask_t saved_mask;
 | 
						cpumask_t saved_mask;
 | 
				
			||||||
	cpumask_of_cpu_ptr(new_mask, cpu);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	saved_mask = current->cpus_allowed;
 | 
						saved_mask = current->cpus_allowed;
 | 
				
			||||||
	set_cpus_allowed_ptr(current, new_mask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
	if (smp_processor_id() != cpu)
 | 
						if (smp_processor_id() != cpu)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -585,15 +584,12 @@ static int centrino_target (struct cpufreq_policy *policy,
 | 
				
			||||||
		 * Best effort undo..
 | 
							 * Best effort undo..
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!cpus_empty(*covered_cpus)) {
 | 
							if (!cpus_empty(*covered_cpus))
 | 
				
			||||||
			cpumask_of_cpu_ptr_declare(new_mask);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			for_each_cpu_mask_nr(j, *covered_cpus) {
 | 
								for_each_cpu_mask_nr(j, *covered_cpus) {
 | 
				
			||||||
				cpumask_of_cpu_ptr_next(new_mask, j);
 | 
									set_cpus_allowed_ptr(current,
 | 
				
			||||||
				set_cpus_allowed_ptr(current, new_mask);
 | 
											     &cpumask_of_cpu(j));
 | 
				
			||||||
				wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
 | 
									wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		tmp = freqs.new;
 | 
							tmp = freqs.new;
 | 
				
			||||||
		freqs.new = freqs.old;
 | 
							freqs.new = freqs.old;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -244,8 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static unsigned int speedstep_get(unsigned int cpu)
 | 
					static unsigned int speedstep_get(unsigned int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_of_cpu_ptr(newmask, cpu);
 | 
						return _speedstep_get(&cpumask_of_cpu(cpu));
 | 
				
			||||||
	return _speedstep_get(newmask);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -516,7 +516,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
 | 
				
			||||||
	unsigned long		j;
 | 
						unsigned long		j;
 | 
				
			||||||
	int			retval;
 | 
						int			retval;
 | 
				
			||||||
	cpumask_t		oldmask;
 | 
						cpumask_t		oldmask;
 | 
				
			||||||
	cpumask_of_cpu_ptr(newmask, cpu);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (num_cache_leaves == 0)
 | 
						if (num_cache_leaves == 0)
 | 
				
			||||||
		return -ENOENT;
 | 
							return -ENOENT;
 | 
				
			||||||
| 
						 | 
					@ -527,7 +526,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	oldmask = current->cpus_allowed;
 | 
						oldmask = current->cpus_allowed;
 | 
				
			||||||
	retval = set_cpus_allowed_ptr(current, newmask);
 | 
						retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
	if (retval)
 | 
						if (retval)
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -62,12 +62,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (reload) {
 | 
						if (reload) {
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
		cpumask_of_cpu_ptr_declare(mask);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		preempt_disable();
 | 
							preempt_disable();
 | 
				
			||||||
		load_LDT(pc);
 | 
							load_LDT(pc);
 | 
				
			||||||
		cpumask_of_cpu_ptr_next(mask, smp_processor_id());
 | 
							if (!cpus_equal(current->mm->cpu_vm_mask,
 | 
				
			||||||
		if (!cpus_equal(current->mm->cpu_vm_mask, *mask))
 | 
									cpumask_of_cpu(smp_processor_id())))
 | 
				
			||||||
			smp_call_function(flush_ldt, current->mm, 1);
 | 
								smp_call_function(flush_ldt, current->mm, 1);
 | 
				
			||||||
		preempt_enable();
 | 
							preempt_enable();
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -388,7 +388,6 @@ static int do_microcode_update (void)
 | 
				
			||||||
	void *new_mc = NULL;
 | 
						void *new_mc = NULL;
 | 
				
			||||||
	int cpu;
 | 
						int cpu;
 | 
				
			||||||
	cpumask_t old;
 | 
						cpumask_t old;
 | 
				
			||||||
	cpumask_of_cpu_ptr_declare(newmask);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	old = current->cpus_allowed;
 | 
						old = current->cpus_allowed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -405,8 +404,7 @@ static int do_microcode_update (void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!uci->valid)
 | 
								if (!uci->valid)
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			cpumask_of_cpu_ptr_next(newmask, cpu);
 | 
								set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
			set_cpus_allowed_ptr(current, newmask);
 | 
					 | 
				
			||||||
			error = get_maching_microcode(new_mc, cpu);
 | 
								error = get_maching_microcode(new_mc, cpu);
 | 
				
			||||||
			if (error < 0)
 | 
								if (error < 0)
 | 
				
			||||||
				goto out;
 | 
									goto out;
 | 
				
			||||||
| 
						 | 
					@ -576,7 +574,6 @@ static int apply_microcode_check_cpu(int cpu)
 | 
				
			||||||
	struct cpuinfo_x86 *c = &cpu_data(cpu);
 | 
						struct cpuinfo_x86 *c = &cpu_data(cpu);
 | 
				
			||||||
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 | 
						struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 | 
				
			||||||
	cpumask_t old;
 | 
						cpumask_t old;
 | 
				
			||||||
	cpumask_of_cpu_ptr(newmask, cpu);
 | 
					 | 
				
			||||||
	unsigned int val[2];
 | 
						unsigned int val[2];
 | 
				
			||||||
	int err = 0;
 | 
						int err = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -585,7 +582,7 @@ static int apply_microcode_check_cpu(int cpu)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	old = current->cpus_allowed;
 | 
						old = current->cpus_allowed;
 | 
				
			||||||
	set_cpus_allowed_ptr(current, newmask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Check if the microcode we have in memory matches the CPU */
 | 
						/* Check if the microcode we have in memory matches the CPU */
 | 
				
			||||||
	if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
 | 
						if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
 | 
				
			||||||
| 
						 | 
					@ -623,12 +620,11 @@ static int apply_microcode_check_cpu(int cpu)
 | 
				
			||||||
static void microcode_init_cpu(int cpu, int resume)
 | 
					static void microcode_init_cpu(int cpu, int resume)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_t old;
 | 
						cpumask_t old;
 | 
				
			||||||
	cpumask_of_cpu_ptr(newmask, cpu);
 | 
					 | 
				
			||||||
	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 | 
						struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	old = current->cpus_allowed;
 | 
						old = current->cpus_allowed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_cpus_allowed_ptr(current, newmask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
	mutex_lock(µcode_mutex);
 | 
						mutex_lock(µcode_mutex);
 | 
				
			||||||
	collect_cpu_info(cpu);
 | 
						collect_cpu_info(cpu);
 | 
				
			||||||
	if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
 | 
						if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
 | 
				
			||||||
| 
						 | 
					@ -661,13 +657,10 @@ static ssize_t reload_store(struct sys_device *dev,
 | 
				
			||||||
	if (end == buf)
 | 
						if (end == buf)
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
	if (val == 1) {
 | 
						if (val == 1) {
 | 
				
			||||||
		cpumask_t old;
 | 
							cpumask_t old = current->cpus_allowed;
 | 
				
			||||||
		cpumask_of_cpu_ptr(newmask, cpu);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		old = current->cpus_allowed;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		get_online_cpus();
 | 
							get_online_cpus();
 | 
				
			||||||
		set_cpus_allowed_ptr(current, newmask);
 | 
							set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		mutex_lock(µcode_mutex);
 | 
							mutex_lock(µcode_mutex);
 | 
				
			||||||
		if (uci->valid)
 | 
							if (uci->valid)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -414,25 +414,20 @@ void native_machine_shutdown(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* The boot cpu is always logical cpu 0 */
 | 
						/* The boot cpu is always logical cpu 0 */
 | 
				
			||||||
	int reboot_cpu_id = 0;
 | 
						int reboot_cpu_id = 0;
 | 
				
			||||||
	cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
	/* See if there has been given a command line override */
 | 
						/* See if there has been given a command line override */
 | 
				
			||||||
	if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
 | 
						if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
 | 
				
			||||||
		cpu_online(reboot_cpu)) {
 | 
							cpu_online(reboot_cpu))
 | 
				
			||||||
		reboot_cpu_id = reboot_cpu;
 | 
							reboot_cpu_id = reboot_cpu;
 | 
				
			||||||
		cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Make certain the cpu I'm about to reboot on is online */
 | 
						/* Make certain the cpu I'm about to reboot on is online */
 | 
				
			||||||
	if (!cpu_online(reboot_cpu_id)) {
 | 
						if (!cpu_online(reboot_cpu_id))
 | 
				
			||||||
		reboot_cpu_id = smp_processor_id();
 | 
							reboot_cpu_id = smp_processor_id();
 | 
				
			||||||
		cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Make certain I only run on the appropriate processor */
 | 
						/* Make certain I only run on the appropriate processor */
 | 
				
			||||||
	set_cpus_allowed_ptr(current, newmask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* O.K Now that I'm on the appropriate processor,
 | 
						/* O.K Now that I'm on the appropriate processor,
 | 
				
			||||||
	 * stop all of the others.
 | 
						 * stop all of the others.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -827,7 +827,6 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
 | 
				
			||||||
static int acpi_processor_get_throttling(struct acpi_processor *pr)
 | 
					static int acpi_processor_get_throttling(struct acpi_processor *pr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_t saved_mask;
 | 
						cpumask_t saved_mask;
 | 
				
			||||||
	cpumask_of_cpu_ptr_declare(new_mask);
 | 
					 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!pr)
 | 
						if (!pr)
 | 
				
			||||||
| 
						 | 
					@ -839,8 +838,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
 | 
				
			||||||
	 * Migrate task to the cpu pointed by pr.
 | 
						 * Migrate task to the cpu pointed by pr.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	saved_mask = current->cpus_allowed;
 | 
						saved_mask = current->cpus_allowed;
 | 
				
			||||||
	cpumask_of_cpu_ptr_next(new_mask, pr->id);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
 | 
				
			||||||
	set_cpus_allowed_ptr(current, new_mask);
 | 
					 | 
				
			||||||
	ret = pr->throttling.acpi_processor_get_throttling(pr);
 | 
						ret = pr->throttling.acpi_processor_get_throttling(pr);
 | 
				
			||||||
	/* restore the previous state */
 | 
						/* restore the previous state */
 | 
				
			||||||
	set_cpus_allowed_ptr(current, &saved_mask);
 | 
						set_cpus_allowed_ptr(current, &saved_mask);
 | 
				
			||||||
| 
						 | 
					@ -989,7 +987,6 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
 | 
				
			||||||
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
 | 
					int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_t saved_mask;
 | 
						cpumask_t saved_mask;
 | 
				
			||||||
	cpumask_of_cpu_ptr_declare(new_mask);
 | 
					 | 
				
			||||||
	int ret = 0;
 | 
						int ret = 0;
 | 
				
			||||||
	unsigned int i;
 | 
						unsigned int i;
 | 
				
			||||||
	struct acpi_processor *match_pr;
 | 
						struct acpi_processor *match_pr;
 | 
				
			||||||
| 
						 | 
					@ -1028,8 +1025,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
 | 
				
			||||||
	 * it can be called only for the cpu pointed by pr.
 | 
						 * it can be called only for the cpu pointed by pr.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
 | 
						if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
 | 
				
			||||||
		cpumask_of_cpu_ptr_next(new_mask, pr->id);
 | 
							set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
 | 
				
			||||||
		set_cpus_allowed_ptr(current, new_mask);
 | 
					 | 
				
			||||||
		ret = p_throttling->acpi_processor_set_throttling(pr,
 | 
							ret = p_throttling->acpi_processor_set_throttling(pr,
 | 
				
			||||||
						t_state.target_state);
 | 
											t_state.target_state);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
| 
						 | 
					@ -1060,8 +1056,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			t_state.cpu = i;
 | 
								t_state.cpu = i;
 | 
				
			||||||
			cpumask_of_cpu_ptr_next(new_mask, i);
 | 
								set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
 | 
				
			||||||
			set_cpus_allowed_ptr(current, new_mask);
 | 
					 | 
				
			||||||
			ret = match_pr->throttling.
 | 
								ret = match_pr->throttling.
 | 
				
			||||||
				acpi_processor_set_throttling(
 | 
									acpi_processor_set_throttling(
 | 
				
			||||||
				match_pr, t_state.target_state);
 | 
									match_pr, t_state.target_state);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -245,7 +245,6 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
 | 
				
			||||||
static int smi_request(struct smi_cmd *smi_cmd)
 | 
					static int smi_request(struct smi_cmd *smi_cmd)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_t old_mask;
 | 
						cpumask_t old_mask;
 | 
				
			||||||
	cpumask_of_cpu_ptr(new_mask, 0);
 | 
					 | 
				
			||||||
	int ret = 0;
 | 
						int ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (smi_cmd->magic != SMI_CMD_MAGIC) {
 | 
						if (smi_cmd->magic != SMI_CMD_MAGIC) {
 | 
				
			||||||
| 
						 | 
					@ -256,7 +255,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* SMI requires CPU 0 */
 | 
						/* SMI requires CPU 0 */
 | 
				
			||||||
	old_mask = current->cpus_allowed;
 | 
						old_mask = current->cpus_allowed;
 | 
				
			||||||
	set_cpus_allowed_ptr(current, new_mask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
 | 
				
			||||||
	if (smp_processor_id() != 0) {
 | 
						if (smp_processor_id() != 0) {
 | 
				
			||||||
		dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
 | 
							dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
 | 
				
			||||||
			__func__);
 | 
								__func__);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -229,11 +229,10 @@ xpc_hb_checker(void *ignore)
 | 
				
			||||||
	int last_IRQ_count = 0;
 | 
						int last_IRQ_count = 0;
 | 
				
			||||||
	int new_IRQ_count;
 | 
						int new_IRQ_count;
 | 
				
			||||||
	int force_IRQ = 0;
 | 
						int force_IRQ = 0;
 | 
				
			||||||
	cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* this thread was marked active by xpc_hb_init() */
 | 
						/* this thread was marked active by xpc_hb_init() */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_cpus_allowed_ptr(current, cpumask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* set our heartbeating to other partitions into motion */
 | 
						/* set our heartbeating to other partitions into motion */
 | 
				
			||||||
	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
 | 
						xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -33,9 +33,8 @@ static int stopmachine(void *cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int irqs_disabled = 0;
 | 
						int irqs_disabled = 0;
 | 
				
			||||||
	int prepared = 0;
 | 
						int prepared = 0;
 | 
				
			||||||
	cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_cpus_allowed_ptr(current, cpumask);
 | 
						set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Ack: we are alive */
 | 
						/* Ack: we are alive */
 | 
				
			||||||
	smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
 | 
						smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -196,12 +196,10 @@ static int tick_check_new_device(struct clock_event_device *newdev)
 | 
				
			||||||
	struct tick_device *td;
 | 
						struct tick_device *td;
 | 
				
			||||||
	int cpu, ret = NOTIFY_OK;
 | 
						int cpu, ret = NOTIFY_OK;
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	cpumask_of_cpu_ptr_declare(cpumask);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_irqsave(&tick_device_lock, flags);
 | 
						spin_lock_irqsave(&tick_device_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpu = smp_processor_id();
 | 
						cpu = smp_processor_id();
 | 
				
			||||||
	cpumask_of_cpu_ptr_next(cpumask, cpu);
 | 
					 | 
				
			||||||
	if (!cpu_isset(cpu, newdev->cpumask))
 | 
						if (!cpu_isset(cpu, newdev->cpumask))
 | 
				
			||||||
		goto out_bc;
 | 
							goto out_bc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -209,7 +207,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
 | 
				
			||||||
	curdev = td->evtdev;
 | 
						curdev = td->evtdev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* cpu local device ? */
 | 
						/* cpu local device ? */
 | 
				
			||||||
	if (!cpus_equal(newdev->cpumask, *cpumask)) {
 | 
						if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * If the cpu affinity of the device interrupt can not
 | 
							 * If the cpu affinity of the device interrupt can not
 | 
				
			||||||
| 
						 | 
					@ -222,7 +220,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
 | 
				
			||||||
		 * If we have a cpu local device already, do not replace it
 | 
							 * If we have a cpu local device already, do not replace it
 | 
				
			||||||
		 * by a non cpu local device
 | 
							 * by a non cpu local device
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (curdev && cpus_equal(curdev->cpumask, *cpumask))
 | 
							if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu)))
 | 
				
			||||||
			goto out_bc;
 | 
								goto out_bc;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -254,7 +252,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
 | 
				
			||||||
		curdev = NULL;
 | 
							curdev = NULL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	clockevents_exchange_device(curdev, newdev);
 | 
						clockevents_exchange_device(curdev, newdev);
 | 
				
			||||||
	tick_setup_device(td, newdev, cpu, cpumask);
 | 
						tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
 | 
				
			||||||
	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
 | 
						if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
 | 
				
			||||||
		tick_oneshot_notify();
 | 
							tick_oneshot_notify();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -213,9 +213,7 @@ static void start_stack_timers(void)
 | 
				
			||||||
	int cpu;
 | 
						int cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_online_cpu(cpu) {
 | 
						for_each_online_cpu(cpu) {
 | 
				
			||||||
		cpumask_of_cpu_ptr(new_mask, cpu);
 | 
							set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 | 
				
			||||||
 | 
					 | 
				
			||||||
		set_cpus_allowed_ptr(current, new_mask);
 | 
					 | 
				
			||||||
		start_stack_timer(cpu);
 | 
							start_stack_timer(cpu);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	set_cpus_allowed_ptr(current, &saved_mask);
 | 
						set_cpus_allowed_ptr(current, &saved_mask);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long preempt_count = preempt_count();
 | 
						unsigned long preempt_count = preempt_count();
 | 
				
			||||||
	int this_cpu = raw_smp_processor_id();
 | 
						int this_cpu = raw_smp_processor_id();
 | 
				
			||||||
	cpumask_of_cpu_ptr_declare(this_mask);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (likely(preempt_count))
 | 
						if (likely(preempt_count))
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
| 
						 | 
					@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
 | 
				
			||||||
	 * Kernel threads bound to a single CPU can safely use
 | 
						 * Kernel threads bound to a single CPU can safely use
 | 
				
			||||||
	 * smp_processor_id():
 | 
						 * smp_processor_id():
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	cpumask_of_cpu_ptr_next(this_mask, this_cpu);
 | 
						if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (cpus_equal(current->cpus_allowed, *this_mask))
 | 
					 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -310,8 +310,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
 | 
				
			||||||
	switch (m->mode) {
 | 
						switch (m->mode) {
 | 
				
			||||||
	case SVC_POOL_PERCPU:
 | 
						case SVC_POOL_PERCPU:
 | 
				
			||||||
	{
 | 
						{
 | 
				
			||||||
		cpumask_of_cpu_ptr(cpumask, node);
 | 
							set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
 | 
				
			||||||
		set_cpus_allowed_ptr(task, cpumask);
 | 
					 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	case SVC_POOL_PERNODE:
 | 
						case SVC_POOL_PERNODE:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue