mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	Merge branch 'core/percpu' into x86/core
Conflicts: kernel/irq/handle.c
This commit is contained in:
		
						commit
						6a385db5ce
					
				
					 168 changed files with 2315 additions and 2348 deletions
				
			
		| 
						 | 
					@ -18,11 +18,11 @@ For an architecture to support this feature, it must define some of
 | 
				
			||||||
these macros in include/asm-XXX/topology.h:
 | 
					these macros in include/asm-XXX/topology.h:
 | 
				
			||||||
#define topology_physical_package_id(cpu)
 | 
					#define topology_physical_package_id(cpu)
 | 
				
			||||||
#define topology_core_id(cpu)
 | 
					#define topology_core_id(cpu)
 | 
				
			||||||
#define topology_thread_siblings(cpu)
 | 
					#define topology_thread_cpumask(cpu)
 | 
				
			||||||
#define topology_core_siblings(cpu)
 | 
					#define topology_core_cpumask(cpu)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
The type of **_id is int.
 | 
					The type of **_id is int.
 | 
				
			||||||
The type of siblings is cpumask_t.
 | 
					The type of siblings is (const) struct cpumask *.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
To be consistent on all architectures, include/linux/topology.h
 | 
					To be consistent on all architectures, include/linux/topology.h
 | 
				
			||||||
provides default definitions for any of the above macros that are
 | 
					provides default definitions for any of the above macros that are
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
 | 
				
			||||||
		cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
 | 
							cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
 | 
				
			||||||
	last_cpu = cpu;
 | 
						last_cpu = cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
 | 
						cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 | 
				
			||||||
	irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
 | 
						irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
 | 
				
			||||||
	.lock = SPIN_LOCK_UNLOCKED
 | 
						.lock = SPIN_LOCK_UNLOCKED
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_CPUMASK_OFFSTACK
 | 
				
			||||||
 | 
					/* We are not allocating bad_irq_desc.affinity or .pending_mask */
 | 
				
			||||||
 | 
					#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
 | 
					 * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
 | 
				
			||||||
 * come via this function.  Instead, they should provide their
 | 
					 * come via this function.  Instead, they should provide their
 | 
				
			||||||
| 
						 | 
					@ -161,7 +166,7 @@ void __init init_IRQ(void)
 | 
				
			||||||
		irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
 | 
							irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
	bad_irq_desc.affinity = CPU_MASK_ALL;
 | 
						cpumask_setall(bad_irq_desc.affinity);
 | 
				
			||||||
	bad_irq_desc.cpu = smp_processor_id();
 | 
						bad_irq_desc.cpu = smp_processor_id();
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	init_arch_irq();
 | 
						init_arch_irq();
 | 
				
			||||||
| 
						 | 
					@ -191,15 +196,16 @@ void migrate_irqs(void)
 | 
				
			||||||
		struct irq_desc *desc = irq_desc + i;
 | 
							struct irq_desc *desc = irq_desc + i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (desc->cpu == cpu) {
 | 
							if (desc->cpu == cpu) {
 | 
				
			||||||
			unsigned int newcpu = any_online_cpu(desc->affinity);
 | 
								unsigned int newcpu = cpumask_any_and(desc->affinity,
 | 
				
			||||||
 | 
												      cpu_online_mask);
 | 
				
			||||||
			if (newcpu == NR_CPUS) {
 | 
								if (newcpu >= nr_cpu_ids) {
 | 
				
			||||||
				if (printk_ratelimit())
 | 
									if (printk_ratelimit())
 | 
				
			||||||
					printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
 | 
										printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
 | 
				
			||||||
					       i, cpu);
 | 
										       i, cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				cpus_setall(desc->affinity);
 | 
									cpumask_setall(desc->affinity);
 | 
				
			||||||
				newcpu = any_online_cpu(desc->affinity);
 | 
									newcpu = cpumask_any_and(desc->affinity,
 | 
				
			||||||
 | 
												 cpu_online_mask);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			route_irq(desc, i, newcpu);
 | 
								route_irq(desc, i, newcpu);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -65,6 +65,7 @@ SECTIONS
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
		. = ALIGN(4096);
 | 
							. = ALIGN(4096);
 | 
				
			||||||
		__per_cpu_start = .;
 | 
							__per_cpu_start = .;
 | 
				
			||||||
 | 
								*(.data.percpu.page_aligned)
 | 
				
			||||||
			*(.data.percpu)
 | 
								*(.data.percpu)
 | 
				
			||||||
			*(.data.percpu.shared_aligned)
 | 
								*(.data.percpu.shared_aligned)
 | 
				
			||||||
		__per_cpu_end = .;
 | 
							__per_cpu_end = .;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
 | 
				
			||||||
	const struct cpumask *mask = cpumask_of(cpu);
 | 
						const struct cpumask *mask = cpumask_of(cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_irq(&desc->lock);
 | 
						spin_lock_irq(&desc->lock);
 | 
				
			||||||
	desc->affinity = *mask;
 | 
						cpumask_copy(desc->affinity, mask);
 | 
				
			||||||
	desc->chip->set_affinity(irq, mask);
 | 
						desc->chip->set_affinity(irq, mask);
 | 
				
			||||||
	spin_unlock_irq(&desc->lock);
 | 
						spin_unlock_irq(&desc->lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = {
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_CPUMASK_OFFSTACK
 | 
				
			||||||
 | 
					/* We are not allocating a variable-sized bad_irq_desc.affinity */
 | 
				
			||||||
 | 
					#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int show_interrupts(struct seq_file *p, void *v)
 | 
					int show_interrupts(struct seq_file *p, void *v)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i = *(loff_t *) v, j;
 | 
						int i = *(loff_t *) v, j;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -84,7 +84,7 @@ void build_cpu_to_node_map(void);
 | 
				
			||||||
	.child			= NULL,			\
 | 
						.child			= NULL,			\
 | 
				
			||||||
	.groups			= NULL,			\
 | 
						.groups			= NULL,			\
 | 
				
			||||||
	.min_interval		= 8,			\
 | 
						.min_interval		= 8,			\
 | 
				
			||||||
	.max_interval		= 8*(min(num_online_cpus(), 32)), \
 | 
						.max_interval		= 8*(min(num_online_cpus(), 32U)), \
 | 
				
			||||||
	.busy_factor		= 64,			\
 | 
						.busy_factor		= 64,			\
 | 
				
			||||||
	.imbalance_pct		= 125,			\
 | 
						.imbalance_pct		= 125,			\
 | 
				
			||||||
	.cache_nice_tries	= 2,			\
 | 
						.cache_nice_tries	= 2,			\
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
 | 
				
			||||||
	if (iosapic_intr_info[irq].count == 0) {
 | 
						if (iosapic_intr_info[irq].count == 0) {
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
		/* Clear affinity */
 | 
							/* Clear affinity */
 | 
				
			||||||
		cpus_setall(idesc->affinity);
 | 
							cpumask_setall(idesc->affinity);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
		/* Clear the interrupt information */
 | 
							/* Clear the interrupt information */
 | 
				
			||||||
		iosapic_intr_info[irq].dest = 0;
 | 
							iosapic_intr_info[irq].dest = 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
 | 
				
			||||||
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 | 
					void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (irq < NR_IRQS) {
 | 
						if (irq < NR_IRQS) {
 | 
				
			||||||
		cpumask_copy(&irq_desc[irq].affinity,
 | 
							cpumask_copy(irq_desc[irq].affinity,
 | 
				
			||||||
			     cpumask_of(cpu_logical_id(hwid)));
 | 
								     cpumask_of(cpu_logical_id(hwid)));
 | 
				
			||||||
		irq_redir[irq] = (char) (redir & 0xff);
 | 
							irq_redir[irq] = (char) (redir & 0xff);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -148,7 +148,7 @@ static void migrate_irqs(void)
 | 
				
			||||||
		if (desc->status == IRQ_PER_CPU)
 | 
							if (desc->status == IRQ_PER_CPU)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
 | 
							if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
 | 
				
			||||||
		    >= nr_cpu_ids) {
 | 
							    >= nr_cpu_ids) {
 | 
				
			||||||
			/*
 | 
								/*
 | 
				
			||||||
			 * Save it for phase 2 processing
 | 
								 * Save it for phase 2 processing
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
 | 
				
			||||||
	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
 | 
						saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
 | 
				
			||||||
	ia64_srlz_d();
 | 
						ia64_srlz_d();
 | 
				
			||||||
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
 | 
						while (vector != IA64_SPURIOUS_INT_VECTOR) {
 | 
				
			||||||
 | 
							struct irq_desc *desc = irq_to_desc(vector);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
 | 
							if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
 | 
				
			||||||
			smp_local_flush_tlb();
 | 
								smp_local_flush_tlb();
 | 
				
			||||||
			kstat_this_cpu.irqs[vector]++;
 | 
								kstat_incr_irqs_this_cpu(vector, desc);
 | 
				
			||||||
		} else if (unlikely(IS_RESCHEDULE(vector)))
 | 
							} else if (unlikely(IS_RESCHEDULE(vector)))
 | 
				
			||||||
			kstat_this_cpu.irqs[vector]++;
 | 
								kstat_incr_irqs_this_cpu(vector, desc);
 | 
				
			||||||
		else {
 | 
							else {
 | 
				
			||||||
			int irq = local_vector_to_irq(vector);
 | 
								int irq = local_vector_to_irq(vector);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -551,11 +553,13 @@ void ia64_process_pending_intr(void)
 | 
				
			||||||
	  * Perform normal interrupt style processing
 | 
						  * Perform normal interrupt style processing
 | 
				
			||||||
	  */
 | 
						  */
 | 
				
			||||||
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
 | 
						while (vector != IA64_SPURIOUS_INT_VECTOR) {
 | 
				
			||||||
 | 
							struct irq_desc *desc = irq_to_desc(vector);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
 | 
							if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
 | 
				
			||||||
			smp_local_flush_tlb();
 | 
								smp_local_flush_tlb();
 | 
				
			||||||
			kstat_this_cpu.irqs[vector]++;
 | 
								kstat_incr_irqs_this_cpu(vector, desc);
 | 
				
			||||||
		} else if (unlikely(IS_RESCHEDULE(vector)))
 | 
							} else if (unlikely(IS_RESCHEDULE(vector)))
 | 
				
			||||||
			kstat_this_cpu.irqs[vector]++;
 | 
								kstat_incr_irqs_this_cpu(vector, desc);
 | 
				
			||||||
		else {
 | 
							else {
 | 
				
			||||||
			struct pt_regs *old_regs = set_irq_regs(NULL);
 | 
								struct pt_regs *old_regs = set_irq_regs(NULL);
 | 
				
			||||||
			int irq = local_vector_to_irq(vector);
 | 
								int irq = local_vector_to_irq(vector);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
 | 
				
			||||||
	msg.data = data;
 | 
						msg.data = data;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	write_msi_msg(irq, &msg);
 | 
						write_msi_msg(irq, &msg);
 | 
				
			||||||
	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
 | 
						cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif /* CONFIG_SMP */
 | 
					#endif /* CONFIG_SMP */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
 | 
				
			||||||
	msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
 | 
						msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dmar_msi_write(irq, &msg);
 | 
						dmar_msi_write(irq, &msg);
 | 
				
			||||||
	irq_desc[irq].affinity = *mask;
 | 
						cpumask_copy(irq_desc[irq].affinity, mask);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif /* CONFIG_SMP */
 | 
					#endif /* CONFIG_SMP */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -219,6 +219,7 @@ SECTIONS
 | 
				
			||||||
  .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
 | 
					  .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
 | 
				
			||||||
	{
 | 
						{
 | 
				
			||||||
		__per_cpu_start = .;
 | 
							__per_cpu_start = .;
 | 
				
			||||||
 | 
							*(.data.percpu.page_aligned)
 | 
				
			||||||
		*(.data.percpu)
 | 
							*(.data.percpu)
 | 
				
			||||||
		*(.data.percpu.shared_aligned)
 | 
							*(.data.percpu.shared_aligned)
 | 
				
			||||||
		__per_cpu_end = .;
 | 
							__per_cpu_end = .;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
 | 
				
			||||||
	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
 | 
						msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	write_msi_msg(irq, &msg);
 | 
						write_msi_msg(irq, &msg);
 | 
				
			||||||
	irq_desc[irq].affinity = *cpu_mask;
 | 
						cpumask_copy(irq_desc[irq].affinity, cpu_mask);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif /* CONFIG_SMP */
 | 
					#endif /* CONFIG_SMP */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#define IRQ_AFFINITY_HOOK(irq)						\
 | 
					#define IRQ_AFFINITY_HOOK(irq)						\
 | 
				
			||||||
do {									\
 | 
					do {									\
 | 
				
			||||||
    if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) {	\
 | 
					    if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
 | 
				
			||||||
	smtc_forward_irq(irq);						\
 | 
						smtc_forward_irq(irq);						\
 | 
				
			||||||
	irq_exit();							\
 | 
						irq_exit();							\
 | 
				
			||||||
	return;								\
 | 
						return;								\
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 | 
				
			||||||
		set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
 | 
							set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	irq_desc[irq].affinity = *cpumask;
 | 
						cpumask_copy(irq_desc[irq].affinity, cpumask);
 | 
				
			||||||
	spin_unlock_irqrestore(&gic_lock, flags);
 | 
						spin_unlock_irqrestore(&gic_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
 | 
				
			||||||
	 * and efficiency, we just pick the easiest one to find.
 | 
						 * and efficiency, we just pick the easiest one to find.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	target = first_cpu(irq_desc[irq].affinity);
 | 
						target = cpumask_first(irq_desc[irq].affinity);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * We depend on the platform code to have correctly processed
 | 
						 * We depend on the platform code to have correctly processed
 | 
				
			||||||
| 
						 | 
					@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
 | 
				
			||||||
	struct clock_event_device *cd;
 | 
						struct clock_event_device *cd;
 | 
				
			||||||
	void *arg_copy = pipi->arg;
 | 
						void *arg_copy = pipi->arg;
 | 
				
			||||||
	int type_copy = pipi->type;
 | 
						int type_copy = pipi->type;
 | 
				
			||||||
 | 
						int irq = MIPS_CPU_IRQ_BASE + 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	smtc_ipi_nq(&freeIPIq, pipi);
 | 
						smtc_ipi_nq(&freeIPIq, pipi);
 | 
				
			||||||
	switch (type_copy) {
 | 
						switch (type_copy) {
 | 
				
			||||||
	case SMTC_CLOCK_TICK:
 | 
						case SMTC_CLOCK_TICK:
 | 
				
			||||||
		irq_enter();
 | 
							irq_enter();
 | 
				
			||||||
		kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
 | 
							kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 | 
				
			||||||
		cd = &per_cpu(mips_clockevent_device, cpu);
 | 
							cd = &per_cpu(mips_clockevent_device, cpu);
 | 
				
			||||||
		cd->event_handler(cd);
 | 
							cd->event_handler(cd);
 | 
				
			||||||
		irq_exit();
 | 
							irq_exit();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
 | 
					void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_t tmask = *affinity;
 | 
						cpumask_t tmask;
 | 
				
			||||||
	int cpu = 0;
 | 
						int cpu = 0;
 | 
				
			||||||
	void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
 | 
						void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
 | 
				
			||||||
	 * be made to forward to an offline "CPU".
 | 
						 * be made to forward to an offline "CPU".
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cpumask_copy(&tmask, affinity);
 | 
				
			||||||
	for_each_cpu(cpu, affinity) {
 | 
						for_each_cpu(cpu, affinity) {
 | 
				
			||||||
		if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
 | 
							if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
 | 
				
			||||||
			cpu_clear(cpu, tmask);
 | 
								cpu_clear(cpu, tmask);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	irq_desc[irq].affinity = tmask;
 | 
						cpumask_copy(irq_desc[irq].affinity, &tmask);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (cpus_empty(tmask))
 | 
						if (cpus_empty(tmask))
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -155,7 +155,7 @@ static void indy_buserror_irq(void)
 | 
				
			||||||
	int irq = SGI_BUSERR_IRQ;
 | 
						int irq = SGI_BUSERR_IRQ;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	irq_enter();
 | 
						irq_enter();
 | 
				
			||||||
	kstat_this_cpu.irqs[irq]++;
 | 
						kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 | 
				
			||||||
	ip22_be_interrupt(irq);
 | 
						ip22_be_interrupt(irq);
 | 
				
			||||||
	irq_exit();
 | 
						irq_exit();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -122,7 +122,7 @@ void indy_8254timer_irq(void)
 | 
				
			||||||
	char c;
 | 
						char c;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	irq_enter();
 | 
						irq_enter();
 | 
				
			||||||
	kstat_this_cpu.irqs[irq]++;
 | 
						kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 | 
				
			||||||
	printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
 | 
						printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
 | 
				
			||||||
	ArcRead(0, &c, 1, &cnt);
 | 
						ArcRead(0, &c, 1, &cnt);
 | 
				
			||||||
	ArcEnterInteractiveMode();
 | 
						ArcEnterInteractiveMode();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = {
 | 
				
			||||||
void bcm1480_mailbox_interrupt(void)
 | 
					void bcm1480_mailbox_interrupt(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int cpu = smp_processor_id();
 | 
						int cpu = smp_processor_id();
 | 
				
			||||||
 | 
						int irq = K_BCM1480_INT_MBOX_0_0;
 | 
				
			||||||
	unsigned int action;
 | 
						unsigned int action;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++;
 | 
						kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 | 
				
			||||||
	/* Load the mailbox register to figure out what we're supposed to do */
 | 
						/* Load the mailbox register to figure out what we're supposed to do */
 | 
				
			||||||
	action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
 | 
						action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = {
 | 
				
			||||||
void sb1250_mailbox_interrupt(void)
 | 
					void sb1250_mailbox_interrupt(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int cpu = smp_processor_id();
 | 
						int cpu = smp_processor_id();
 | 
				
			||||||
 | 
						int irq = K_INT_MBOX_0;
 | 
				
			||||||
	unsigned int action;
 | 
						unsigned int action;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kstat_this_cpu.irqs[K_INT_MBOX_0]++;
 | 
						kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 | 
				
			||||||
	/* Load the mailbox register to figure out what we're supposed to do */
 | 
						/* Load the mailbox register to figure out what we're supposed to do */
 | 
				
			||||||
	action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
 | 
						action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
 | 
				
			||||||
	 * the stack NMI-atomically, it's safe to use smp_processor_id().
 | 
						 * the stack NMI-atomically, it's safe to use smp_processor_id().
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	int sum, cpu = smp_processor_id();
 | 
						int sum, cpu = smp_processor_id();
 | 
				
			||||||
 | 
						int irq = NMIIRQ;
 | 
				
			||||||
	u8 wdt, tmp;
 | 
						u8 wdt, tmp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	wdt = WDCTR & ~WDCTR_WDCNE;
 | 
						wdt = WDCTR & ~WDCTR_WDCNE;
 | 
				
			||||||
| 
						 | 
					@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
 | 
				
			||||||
	NMICR = NMICR_WDIF;
 | 
						NMICR = NMICR_WDIF;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nmi_count(cpu)++;
 | 
						nmi_count(cpu)++;
 | 
				
			||||||
	kstat_this_cpu.irqs[NMIIRQ]++;
 | 
						kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 | 
				
			||||||
	sum = irq_stat[cpu].__irq_count;
 | 
						sum = irq_stat[cpu].__irq_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (last_irq_sums[cpu] == sum) {
 | 
						if (last_irq_sums[cpu] == sum) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
 | 
				
			||||||
	if (CHECK_IRQ_PER_CPU(irq)) {
 | 
						if (CHECK_IRQ_PER_CPU(irq)) {
 | 
				
			||||||
		/* Bad linux design decision.  The mask has already
 | 
							/* Bad linux design decision.  The mask has already
 | 
				
			||||||
		 * been set; we must reset it */
 | 
							 * been set; we must reset it */
 | 
				
			||||||
		irq_desc[irq].affinity = CPU_MASK_ALL;
 | 
							cpumask_setall(irq_desc[irq].affinity);
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
 | 
				
			||||||
	if (cpu_check_affinity(irq, dest))
 | 
						if (cpu_check_affinity(irq, dest))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	irq_desc[irq].affinity = *dest;
 | 
						cpumask_copy(irq_desc[irq].affinity, dest);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
 | 
				
			||||||
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
 | 
					unsigned long txn_affinity_addr(unsigned int irq, int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
 | 
						cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return per_cpu(cpu_data, cpu).txn_addr;
 | 
						return per_cpu(cpu_data, cpu).txn_addr;
 | 
				
			||||||
| 
						 | 
					@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
 | 
				
			||||||
	irq = eirr_to_irq(eirr_val);
 | 
						irq = eirr_to_irq(eirr_val);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
	dest = irq_desc[irq].affinity;
 | 
						cpumask_copy(&dest, irq_desc[irq].affinity);
 | 
				
			||||||
	if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
 | 
						if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
 | 
				
			||||||
	    !cpu_isset(smp_processor_id(), dest)) {
 | 
						    !cpu_isset(smp_processor_id(), dest)) {
 | 
				
			||||||
		int cpu = first_cpu(dest);
 | 
							int cpu = first_cpu(dest);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
 | 
				
			||||||
		if (irq_desc[irq].status & IRQ_PER_CPU)
 | 
							if (irq_desc[irq].status & IRQ_PER_CPU)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		cpus_and(mask, irq_desc[irq].affinity, map);
 | 
							cpumask_and(&mask, irq_desc[irq].affinity, &map);
 | 
				
			||||||
		if (any_online_cpu(mask) == NR_CPUS) {
 | 
							if (any_online_cpu(mask) == NR_CPUS) {
 | 
				
			||||||
			printk("Breaking affinity for irq %i\n", irq);
 | 
								printk("Breaking affinity for irq %i\n", irq);
 | 
				
			||||||
			mask = map;
 | 
								mask = map;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -184,6 +184,7 @@ SECTIONS
 | 
				
			||||||
	. = ALIGN(PAGE_SIZE);
 | 
						. = ALIGN(PAGE_SIZE);
 | 
				
			||||||
	.data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
 | 
						.data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
 | 
				
			||||||
		__per_cpu_start = .;
 | 
							__per_cpu_start = .;
 | 
				
			||||||
 | 
							*(.data.percpu.page_aligned)
 | 
				
			||||||
		*(.data.percpu)
 | 
							*(.data.percpu)
 | 
				
			||||||
		*(.data.percpu.shared_aligned)
 | 
							*(.data.percpu.shared_aligned)
 | 
				
			||||||
		__per_cpu_end = .;
 | 
							__per_cpu_end = .;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int server;
 | 
						int server;
 | 
				
			||||||
	/* For the moment only implement delivery to all cpus or one cpu */
 | 
						/* For the moment only implement delivery to all cpus or one cpu */
 | 
				
			||||||
	cpumask_t cpumask = irq_desc[virq].affinity;
 | 
						cpumask_t cpumask;
 | 
				
			||||||
	cpumask_t tmp = CPU_MASK_NONE;
 | 
						cpumask_t tmp = CPU_MASK_NONE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cpumask_copy(&cpumask, irq_desc[virq].affinity);
 | 
				
			||||||
	if (!distribute_irqs)
 | 
						if (!distribute_irqs)
 | 
				
			||||||
		return default_server;
 | 
							return default_server;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
 | 
				
			||||||
		       virq, cpu);
 | 
							       virq, cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Reset affinity to all cpus */
 | 
							/* Reset affinity to all cpus */
 | 
				
			||||||
		irq_desc[virq].affinity = CPU_MASK_ALL;
 | 
							cpumask_setall(irq_desc[virq].affinity);
 | 
				
			||||||
		desc->chip->set_affinity(virq, cpu_all_mask);
 | 
							desc->chip->set_affinity(virq, cpu_all_mask);
 | 
				
			||||||
unlock:
 | 
					unlock:
 | 
				
			||||||
		spin_unlock_irqrestore(&desc->lock, flags);
 | 
							spin_unlock_irqrestore(&desc->lock, flags);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
static int irq_choose_cpu(unsigned int virt_irq)
 | 
					static int irq_choose_cpu(unsigned int virt_irq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_t mask = irq_desc[virt_irq].affinity;
 | 
						cpumask_t mask;
 | 
				
			||||||
	int cpuid;
 | 
						int cpuid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cpumask_copy(&mask, irq_desc[virt_irq].affinity);
 | 
				
			||||||
	if (cpus_equal(mask, CPU_MASK_ALL)) {
 | 
						if (cpus_equal(mask, CPU_MASK_ALL)) {
 | 
				
			||||||
		static int irq_rover;
 | 
							static int irq_rover;
 | 
				
			||||||
		static DEFINE_SPINLOCK(irq_rover_lock);
 | 
							static DEFINE_SPINLOCK(irq_rover_lock);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -247,9 +247,10 @@ struct irq_handler_data {
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
static int irq_choose_cpu(unsigned int virt_irq)
 | 
					static int irq_choose_cpu(unsigned int virt_irq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpumask_t mask = irq_desc[virt_irq].affinity;
 | 
						cpumask_t mask;
 | 
				
			||||||
	int cpuid;
 | 
						int cpuid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cpumask_copy(&mask, irq_desc[virt_irq].affinity);
 | 
				
			||||||
	if (cpus_equal(mask, CPU_MASK_ALL)) {
 | 
						if (cpus_equal(mask, CPU_MASK_ALL)) {
 | 
				
			||||||
		static int irq_rover;
 | 
							static int irq_rover;
 | 
				
			||||||
		static DEFINE_SPINLOCK(irq_rover_lock);
 | 
							static DEFINE_SPINLOCK(irq_rover_lock);
 | 
				
			||||||
| 
						 | 
					@ -854,7 +855,7 @@ void fixup_irqs(void)
 | 
				
			||||||
		    !(irq_desc[irq].status & IRQ_PER_CPU)) {
 | 
							    !(irq_desc[irq].status & IRQ_PER_CPU)) {
 | 
				
			||||||
			if (irq_desc[irq].chip->set_affinity)
 | 
								if (irq_desc[irq].chip->set_affinity)
 | 
				
			||||||
				irq_desc[irq].chip->set_affinity(irq,
 | 
									irq_desc[irq].chip->set_affinity(irq,
 | 
				
			||||||
					&irq_desc[irq].affinity);
 | 
										irq_desc[irq].affinity);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
 | 
							spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -729,7 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	irq_enter();
 | 
						irq_enter();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kstat_this_cpu.irqs[0]++;
 | 
						kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (unlikely(!evt->event_handler)) {
 | 
						if (unlikely(!evt->event_handler)) {
 | 
				
			||||||
		printk(KERN_WARNING
 | 
							printk(KERN_WARNING
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -133,7 +133,7 @@ config ARCH_HAS_CACHE_LINE_SIZE
 | 
				
			||||||
	def_bool y
 | 
						def_bool y
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config HAVE_SETUP_PER_CPU_AREA
 | 
					config HAVE_SETUP_PER_CPU_AREA
 | 
				
			||||||
	def_bool X86_64_SMP || (X86_SMP && !X86_VOYAGER)
 | 
						def_bool y
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config HAVE_CPUMASK_OF_CPU_MAP
 | 
					config HAVE_CPUMASK_OF_CPU_MAP
 | 
				
			||||||
	def_bool X86_64_SMP
 | 
						def_bool X86_64_SMP
 | 
				
			||||||
| 
						 | 
					@ -391,6 +391,13 @@ config X86_RDC321X
 | 
				
			||||||
	  as R-8610-(G).
 | 
						  as R-8610-(G).
 | 
				
			||||||
	  If you don't have one of these chips, you should say N here.
 | 
						  If you don't have one of these chips, you should say N here.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					config X86_UV
 | 
				
			||||||
 | 
						bool "SGI Ultraviolet"
 | 
				
			||||||
 | 
						depends on X86_64
 | 
				
			||||||
 | 
						help
 | 
				
			||||||
 | 
						  This option is needed in order to support SGI Ultraviolet systems.
 | 
				
			||||||
 | 
						  If you don't have one of these, you should say N here.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config SCHED_OMIT_FRAME_POINTER
 | 
					config SCHED_OMIT_FRAME_POINTER
 | 
				
			||||||
	def_bool y
 | 
						def_bool y
 | 
				
			||||||
	prompt "Single-depth WCHAN output"
 | 
						prompt "Single-depth WCHAN output"
 | 
				
			||||||
| 
						 | 
					@ -1340,13 +1347,17 @@ config SECCOMP
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	  If unsure, say Y. Only embedded should say N here.
 | 
						  If unsure, say Y. Only embedded should say N here.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					config CC_STACKPROTECTOR_ALL
 | 
				
			||||||
 | 
						bool
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config CC_STACKPROTECTOR
 | 
					config CC_STACKPROTECTOR
 | 
				
			||||||
	bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
 | 
						bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
 | 
				
			||||||
	depends on X86_64 && EXPERIMENTAL && BROKEN
 | 
						depends on X86_64
 | 
				
			||||||
 | 
						select CC_STACKPROTECTOR_ALL
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
          This option turns on the -fstack-protector GCC feature. This
 | 
					          This option turns on the -fstack-protector GCC feature. This
 | 
				
			||||||
	  feature puts, at the beginning of critical functions, a canary
 | 
						  feature puts, at the beginning of functions, a canary value on
 | 
				
			||||||
	  value on the stack just before the return address, and validates
 | 
						  the stack just before the return address, and validates
 | 
				
			||||||
	  the value just before actually returning.  Stack based buffer
 | 
						  the value just before actually returning.  Stack based buffer
 | 
				
			||||||
	  overflows (that need to overwrite this return address) now also
 | 
						  overflows (that need to overwrite this return address) now also
 | 
				
			||||||
	  overwrite the canary, which gets detected and the attack is then
 | 
						  overwrite the canary, which gets detected and the attack is then
 | 
				
			||||||
| 
						 | 
					@ -1354,15 +1365,8 @@ config CC_STACKPROTECTOR
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	  This feature requires gcc version 4.2 or above, or a distribution
 | 
						  This feature requires gcc version 4.2 or above, or a distribution
 | 
				
			||||||
	  gcc with the feature backported. Older versions are automatically
 | 
						  gcc with the feature backported. Older versions are automatically
 | 
				
			||||||
	  detected and for those versions, this configuration option is ignored.
 | 
						  detected and for those versions, this configuration option is
 | 
				
			||||||
 | 
						  ignored. (and a warning is printed during bootup)
 | 
				
			||||||
config CC_STACKPROTECTOR_ALL
 | 
					 | 
				
			||||||
	bool "Use stack-protector for all functions"
 | 
					 | 
				
			||||||
	depends on CC_STACKPROTECTOR
 | 
					 | 
				
			||||||
	help
 | 
					 | 
				
			||||||
	  Normally, GCC only inserts the canary value protection for
 | 
					 | 
				
			||||||
	  functions that use large-ish on-stack buffers. By enabling
 | 
					 | 
				
			||||||
	  this option, GCC will be asked to do this for ALL functions.
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
source kernel/Kconfig.hz
 | 
					source kernel/Kconfig.hz
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -292,25 +292,23 @@ config X86_CPU
 | 
				
			||||||
# Define implied options from the CPU selection here
 | 
					# Define implied options from the CPU selection here
 | 
				
			||||||
config X86_L1_CACHE_BYTES
 | 
					config X86_L1_CACHE_BYTES
 | 
				
			||||||
	int
 | 
						int
 | 
				
			||||||
	default "128" if GENERIC_CPU || MPSC
 | 
						default "128" if MPSC
 | 
				
			||||||
	default "64" if MK8 || MCORE2
 | 
						default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
 | 
				
			||||||
	depends on X86_64
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
config X86_INTERNODE_CACHE_BYTES
 | 
					config X86_INTERNODE_CACHE_BYTES
 | 
				
			||||||
	int
 | 
						int
 | 
				
			||||||
	default "4096" if X86_VSMP
 | 
						default "4096" if X86_VSMP
 | 
				
			||||||
	default X86_L1_CACHE_BYTES if !X86_VSMP
 | 
						default X86_L1_CACHE_BYTES if !X86_VSMP
 | 
				
			||||||
	depends on X86_64
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
config X86_CMPXCHG
 | 
					config X86_CMPXCHG
 | 
				
			||||||
	def_bool X86_64 || (X86_32 && !M386)
 | 
						def_bool X86_64 || (X86_32 && !M386)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config X86_L1_CACHE_SHIFT
 | 
					config X86_L1_CACHE_SHIFT
 | 
				
			||||||
	int
 | 
						int
 | 
				
			||||||
	default "7" if MPENTIUM4 || X86_GENERIC || GENERIC_CPU || MPSC
 | 
						default "7" if MPENTIUM4 || MPSC
 | 
				
			||||||
	default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
 | 
						default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
 | 
				
			||||||
	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
 | 
						default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
 | 
				
			||||||
	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7
 | 
						default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config X86_XADD
 | 
					config X86_XADD
 | 
				
			||||||
	def_bool y
 | 
						def_bool y
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -117,6 +117,7 @@ config DEBUG_RODATA
 | 
				
			||||||
config DEBUG_RODATA_TEST
 | 
					config DEBUG_RODATA_TEST
 | 
				
			||||||
	bool "Testcase for the DEBUG_RODATA feature"
 | 
						bool "Testcase for the DEBUG_RODATA feature"
 | 
				
			||||||
	depends on DEBUG_RODATA
 | 
						depends on DEBUG_RODATA
 | 
				
			||||||
 | 
						default y
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  This option enables a testcase for the DEBUG_RODATA
 | 
						  This option enables a testcase for the DEBUG_RODATA
 | 
				
			||||||
	  feature as well as for the change_page_attr() infrastructure.
 | 
						  feature as well as for the change_page_attr() infrastructure.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -73,7 +73,7 @@ else
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
 | 
					        stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
 | 
				
			||||||
        stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
 | 
					        stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
 | 
				
			||||||
                "$(CC)" -fstack-protector )
 | 
					                "$(CC)" "-fstack-protector -DGCC_HAS_SP" )
 | 
				
			||||||
        stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
 | 
					        stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
 | 
				
			||||||
                "$(CC)" -fstack-protector-all )
 | 
					                "$(CC)" -fstack-protector-all )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
 | 
				
			||||||
	CFI_DEF_CFA	rsp,0
 | 
						CFI_DEF_CFA	rsp,0
 | 
				
			||||||
	CFI_REGISTER	rsp,rbp
 | 
						CFI_REGISTER	rsp,rbp
 | 
				
			||||||
	SWAPGS_UNSAFE_STACK
 | 
						SWAPGS_UNSAFE_STACK
 | 
				
			||||||
	movq	%gs:pda_kernelstack, %rsp
 | 
						movq	PER_CPU_VAR(kernel_stack), %rsp
 | 
				
			||||||
	addq	$(PDA_STACKOFFSET),%rsp	
 | 
						addq	$(KERNEL_STACK_OFFSET),%rsp
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * No need to follow this irqs on/off section: the syscall
 | 
						 * No need to follow this irqs on/off section: the syscall
 | 
				
			||||||
	 * disabled irqs, here we enable it straight after entry:
 | 
						 * disabled irqs, here we enable it straight after entry:
 | 
				
			||||||
| 
						 | 
					@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
 | 
				
			||||||
ENTRY(ia32_cstar_target)
 | 
					ENTRY(ia32_cstar_target)
 | 
				
			||||||
	CFI_STARTPROC32	simple
 | 
						CFI_STARTPROC32	simple
 | 
				
			||||||
	CFI_SIGNAL_FRAME
 | 
						CFI_SIGNAL_FRAME
 | 
				
			||||||
	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
 | 
						CFI_DEF_CFA	rsp,KERNEL_STACK_OFFSET
 | 
				
			||||||
	CFI_REGISTER	rip,rcx
 | 
						CFI_REGISTER	rip,rcx
 | 
				
			||||||
	/*CFI_REGISTER	rflags,r11*/
 | 
						/*CFI_REGISTER	rflags,r11*/
 | 
				
			||||||
	SWAPGS_UNSAFE_STACK
 | 
						SWAPGS_UNSAFE_STACK
 | 
				
			||||||
	movl	%esp,%r8d
 | 
						movl	%esp,%r8d
 | 
				
			||||||
	CFI_REGISTER	rsp,r8
 | 
						CFI_REGISTER	rsp,r8
 | 
				
			||||||
	movq	%gs:pda_kernelstack,%rsp
 | 
						movq	PER_CPU_VAR(kernel_stack),%rsp
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * No need to follow this irqs on/off section: the syscall
 | 
						 * No need to follow this irqs on/off section: the syscall
 | 
				
			||||||
	 * disabled irqs and here we enable it straight after entry:
 | 
						 * disabled irqs and here we enable it straight after entry:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										12
									
								
								arch/x86/include/asm/apicnum.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								arch/x86/include/asm/apicnum.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,12 @@
 | 
				
			||||||
 | 
					#ifndef _ASM_X86_APICNUM_H
 | 
				
			||||||
 | 
					#define _ASM_X86_APICNUM_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* define MAX_IO_APICS */
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
 | 
					# define MAX_IO_APICS 64
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					# define MAX_IO_APICS 128
 | 
				
			||||||
 | 
					# define MAX_LOCAL_APIC 32768
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* _ASM_X86_APICNUM_H */
 | 
				
			||||||
| 
						 | 
					@ -7,6 +7,20 @@
 | 
				
			||||||
#include <linux/nodemask.h>
 | 
					#include <linux/nodemask.h>
 | 
				
			||||||
#include <linux/percpu.h>
 | 
					#include <linux/percpu.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern void prefill_possible_map(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#else /* CONFIG_SMP */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void prefill_possible_map(void) {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define cpu_physical_id(cpu)			boot_cpu_physical_apicid
 | 
				
			||||||
 | 
					#define safe_smp_processor_id()			0
 | 
				
			||||||
 | 
					#define stack_smp_processor_id()		0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* CONFIG_SMP */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct x86_cpu {
 | 
					struct x86_cpu {
 | 
				
			||||||
	struct cpu cpu;
 | 
						struct cpu cpu;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					@ -17,4 +31,11 @@ extern void arch_unregister_cpu(int);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DECLARE_PER_CPU(int, cpu_state);
 | 
					DECLARE_PER_CPU(int, cpu_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
 | 
				
			||||||
 | 
					extern unsigned char boot_cpu_id;
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					#define boot_cpu_id				0
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _ASM_X86_CPU_H */
 | 
					#endif /* _ASM_X86_CPU_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										32
									
								
								arch/x86/include/asm/cpumask.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								arch/x86/include/asm/cpumask.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,32 @@
 | 
				
			||||||
 | 
					#ifndef _ASM_X86_CPUMASK_H
 | 
				
			||||||
 | 
					#define _ASM_X86_CPUMASK_H
 | 
				
			||||||
 | 
					#ifndef __ASSEMBLY__
 | 
				
			||||||
 | 
					#include <linux/cpumask.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_64
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern cpumask_var_t cpu_callin_mask;
 | 
				
			||||||
 | 
					extern cpumask_var_t cpu_callout_mask;
 | 
				
			||||||
 | 
					extern cpumask_var_t cpu_initialized_mask;
 | 
				
			||||||
 | 
					extern cpumask_var_t cpu_sibling_setup_mask;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern void setup_cpu_local_masks(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#else /* CONFIG_X86_32 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern cpumask_t cpu_callin_map;
 | 
				
			||||||
 | 
					extern cpumask_t cpu_callout_map;
 | 
				
			||||||
 | 
					extern cpumask_t cpu_initialized;
 | 
				
			||||||
 | 
					extern cpumask_t cpu_sibling_setup_map;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define cpu_callin_mask		((struct cpumask *)&cpu_callin_map)
 | 
				
			||||||
 | 
					#define cpu_callout_mask	((struct cpumask *)&cpu_callout_map)
 | 
				
			||||||
 | 
					#define cpu_initialized_mask	((struct cpumask *)&cpu_initialized)
 | 
				
			||||||
 | 
					#define cpu_sibling_setup_mask	((struct cpumask *)&cpu_sibling_setup_map)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void setup_cpu_local_masks(void) { }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* CONFIG_X86_32 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* __ASSEMBLY__ */
 | 
				
			||||||
 | 
					#endif /* _ASM_X86_CPUMASK_H */
 | 
				
			||||||
| 
						 | 
					@ -1,39 +1,21 @@
 | 
				
			||||||
#ifndef _ASM_X86_CURRENT_H
 | 
					#ifndef _ASM_X86_CURRENT_H
 | 
				
			||||||
#define _ASM_X86_CURRENT_H
 | 
					#define _ASM_X86_CURRENT_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					 | 
				
			||||||
#include <linux/compiler.h>
 | 
					#include <linux/compiler.h>
 | 
				
			||||||
#include <asm/percpu.h>
 | 
					#include <asm/percpu.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef __ASSEMBLY__
 | 
				
			||||||
struct task_struct;
 | 
					struct task_struct;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DECLARE_PER_CPU(struct task_struct *, current_task);
 | 
					DECLARE_PER_CPU(struct task_struct *, current_task);
 | 
				
			||||||
static __always_inline struct task_struct *get_current(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return x86_read_percpu(current_task);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#else /* X86_32 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifndef __ASSEMBLY__
 | 
					 | 
				
			||||||
#include <asm/pda.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct task_struct;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
static __always_inline struct task_struct *get_current(void)
 | 
					static __always_inline struct task_struct *get_current(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return read_pda(pcurrent);
 | 
						return percpu_read(current_task);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else /* __ASSEMBLY__ */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <asm/asm-offsets.h>
 | 
					 | 
				
			||||||
#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* __ASSEMBLY__ */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* X86_32 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define current get_current()
 | 
					#define current get_current()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* __ASSEMBLY__ */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _ASM_X86_CURRENT_H */
 | 
					#endif /* _ASM_X86_CURRENT_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -138,11 +138,4 @@ struct genapic {
 | 
				
			||||||
extern struct genapic *genapic;
 | 
					extern struct genapic *genapic;
 | 
				
			||||||
extern void es7000_update_genapic_to_cluster(void);
 | 
					extern void es7000_update_genapic_to_cluster(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
 | 
					 | 
				
			||||||
#define get_uv_system_type()		UV_NONE
 | 
					 | 
				
			||||||
#define is_uv_system()			0
 | 
					 | 
				
			||||||
#define uv_wakeup_secondary(a, b)	1
 | 
					 | 
				
			||||||
#define uv_system_init()		do {} while (0)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_GENAPIC_32_H */
 | 
					#endif /* _ASM_X86_GENAPIC_32_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -51,15 +51,9 @@ extern struct genapic apic_x2apic_phys;
 | 
				
			||||||
extern int acpi_madt_oem_check(char *, char *);
 | 
					extern int acpi_madt_oem_check(char *, char *);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void apic_send_IPI_self(int vector);
 | 
					extern void apic_send_IPI_self(int vector);
 | 
				
			||||||
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
 | 
					 | 
				
			||||||
extern enum uv_system_type get_uv_system_type(void);
 | 
					 | 
				
			||||||
extern int is_uv_system(void);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern struct genapic apic_x2apic_uv_x;
 | 
					extern struct genapic apic_x2apic_uv_x;
 | 
				
			||||||
DECLARE_PER_CPU(int, x2apic_extra_bits);
 | 
					DECLARE_PER_CPU(int, x2apic_extra_bits);
 | 
				
			||||||
extern void uv_cpu_init(void);
 | 
					 | 
				
			||||||
extern void uv_system_init(void);
 | 
					 | 
				
			||||||
extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void setup_apic_routing(void);
 | 
					extern void setup_apic_routing(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,11 +1,52 @@
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					#ifndef _ASM_X86_HARDIRQ_H
 | 
				
			||||||
# include "hardirq_32.h"
 | 
					#define _ASM_X86_HARDIRQ_H
 | 
				
			||||||
#else
 | 
					
 | 
				
			||||||
# include "hardirq_64.h"
 | 
					#include <linux/threads.h>
 | 
				
			||||||
 | 
					#include <linux/irq.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					typedef struct {
 | 
				
			||||||
 | 
						unsigned int __softirq_pending;
 | 
				
			||||||
 | 
						unsigned int __nmi_count;	/* arch dependent */
 | 
				
			||||||
 | 
						unsigned int irq0_irqs;
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_LOCAL_APIC
 | 
				
			||||||
 | 
						unsigned int apic_timer_irqs;	/* arch dependent */
 | 
				
			||||||
 | 
						unsigned int irq_spurious_count;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
 | 
						unsigned int irq_resched_count;
 | 
				
			||||||
 | 
						unsigned int irq_call_count;
 | 
				
			||||||
 | 
						unsigned int irq_tlb_count;
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_MCE
 | 
				
			||||||
 | 
						unsigned int irq_thermal_count;
 | 
				
			||||||
 | 
					# ifdef CONFIG_X86_64
 | 
				
			||||||
 | 
						unsigned int irq_threshold_count;
 | 
				
			||||||
 | 
					# endif
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					} ____cacheline_aligned irq_cpustat_t;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
 | 
				
			||||||
 | 
					#define MAX_HARDIRQS_PER_CPU NR_VECTORS
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define __ARCH_IRQ_STAT
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define inc_irq_stat(member)	percpu_add(irq_stat.member, 1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define local_softirq_pending()	percpu_read(irq_stat.__softirq_pending)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define __ARCH_SET_SOFTIRQ_PENDING
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define set_softirq_pending(x)	percpu_write(irq_stat.__softirq_pending, (x))
 | 
				
			||||||
 | 
					#define or_softirq_pending(x)	percpu_or(irq_stat.__softirq_pending, (x))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern void ack_bad_irq(unsigned int irq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern u64 arch_irq_stat_cpu(unsigned int cpu);
 | 
					extern u64 arch_irq_stat_cpu(unsigned int cpu);
 | 
				
			||||||
#define arch_irq_stat_cpu	arch_irq_stat_cpu
 | 
					#define arch_irq_stat_cpu	arch_irq_stat_cpu
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern u64 arch_irq_stat(void);
 | 
					extern u64 arch_irq_stat(void);
 | 
				
			||||||
#define arch_irq_stat		arch_irq_stat
 | 
					#define arch_irq_stat		arch_irq_stat
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* _ASM_X86_HARDIRQ_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,30 +0,0 @@
 | 
				
			||||||
#ifndef _ASM_X86_HARDIRQ_32_H
 | 
					 | 
				
			||||||
#define _ASM_X86_HARDIRQ_32_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <linux/threads.h>
 | 
					 | 
				
			||||||
#include <linux/irq.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
typedef struct {
 | 
					 | 
				
			||||||
	unsigned int __softirq_pending;
 | 
					 | 
				
			||||||
	unsigned long idle_timestamp;
 | 
					 | 
				
			||||||
	unsigned int __nmi_count;	/* arch dependent */
 | 
					 | 
				
			||||||
	unsigned int apic_timer_irqs;	/* arch dependent */
 | 
					 | 
				
			||||||
	unsigned int irq0_irqs;
 | 
					 | 
				
			||||||
	unsigned int irq_resched_count;
 | 
					 | 
				
			||||||
	unsigned int irq_call_count;
 | 
					 | 
				
			||||||
	unsigned int irq_tlb_count;
 | 
					 | 
				
			||||||
	unsigned int irq_thermal_count;
 | 
					 | 
				
			||||||
	unsigned int irq_spurious_count;
 | 
					 | 
				
			||||||
} ____cacheline_aligned irq_cpustat_t;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define __ARCH_IRQ_STAT
 | 
					 | 
				
			||||||
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define inc_irq_stat(member)	(__get_cpu_var(irq_stat).member++)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void ack_bad_irq(unsigned int irq);
 | 
					 | 
				
			||||||
#include <linux/irq_cpustat.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_HARDIRQ_32_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -1,25 +0,0 @@
 | 
				
			||||||
#ifndef _ASM_X86_HARDIRQ_64_H
 | 
					 | 
				
			||||||
#define _ASM_X86_HARDIRQ_64_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <linux/threads.h>
 | 
					 | 
				
			||||||
#include <linux/irq.h>
 | 
					 | 
				
			||||||
#include <asm/pda.h>
 | 
					 | 
				
			||||||
#include <asm/apic.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
 | 
					 | 
				
			||||||
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define __ARCH_IRQ_STAT 1
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define inc_irq_stat(member)	add_pda(member, 1)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define local_softirq_pending() read_pda(__softirq_pending)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define __ARCH_SET_SOFTIRQ_PENDING 1
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
 | 
					 | 
				
			||||||
#define or_softirq_pending(x)  or_pda(__softirq_pending, (x))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
extern void ack_bad_irq(unsigned int irq);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_HARDIRQ_64_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
 | 
				
			||||||
extern int nr_ioapics;
 | 
					extern int nr_ioapics;
 | 
				
			||||||
extern int nr_ioapic_registers[MAX_IO_APICS];
 | 
					extern int nr_ioapic_registers[MAX_IO_APICS];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * MP-BIOS irq configuration table structures:
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define MP_MAX_IOAPIC_PIN 127
 | 
					#define MP_MAX_IOAPIC_PIN 127
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct mp_config_ioapic {
 | 
					 | 
				
			||||||
	unsigned long mp_apicaddr;
 | 
					 | 
				
			||||||
	unsigned int mp_apicid;
 | 
					 | 
				
			||||||
	unsigned char mp_type;
 | 
					 | 
				
			||||||
	unsigned char mp_apicver;
 | 
					 | 
				
			||||||
	unsigned char mp_flags;
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct mp_config_intsrc {
 | 
					 | 
				
			||||||
	unsigned int mp_dstapic;
 | 
					 | 
				
			||||||
	unsigned char mp_type;
 | 
					 | 
				
			||||||
	unsigned char mp_irqtype;
 | 
					 | 
				
			||||||
	unsigned short mp_irqflag;
 | 
					 | 
				
			||||||
	unsigned char mp_srcbus;
 | 
					 | 
				
			||||||
	unsigned char mp_srcbusirq;
 | 
					 | 
				
			||||||
	unsigned char mp_dstirq;
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/* I/O APIC entries */
 | 
					/* I/O APIC entries */
 | 
				
			||||||
extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
 | 
					extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* # of MP IRQ source entries */
 | 
					/* # of MP IRQ source entries */
 | 
				
			||||||
extern int mp_irq_entries;
 | 
					extern int mp_irq_entries;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* MP IRQ source entries */
 | 
					/* MP IRQ source entries */
 | 
				
			||||||
extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
 | 
					extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* non-0 if default (table-less) MP configuration */
 | 
					/* non-0 if default (table-less) MP configuration */
 | 
				
			||||||
extern int mpc_default_type;
 | 
					extern int mpc_default_type;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,5 +1,31 @@
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					/*
 | 
				
			||||||
# include "irq_regs_32.h"
 | 
					 * Per-cpu current frame pointer - the location of the last exception frame on
 | 
				
			||||||
#else
 | 
					 * the stack, stored in the per-cpu area.
 | 
				
			||||||
# include "irq_regs_64.h"
 | 
					 *
 | 
				
			||||||
#endif
 | 
					 * Jeremy Fitzhardinge <jeremy@goop.org>
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#ifndef _ASM_X86_IRQ_REGS_H
 | 
				
			||||||
 | 
					#define _ASM_X86_IRQ_REGS_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <asm/percpu.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define ARCH_HAS_OWN_IRQ_REGS
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DECLARE_PER_CPU(struct pt_regs *, irq_regs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline struct pt_regs *get_irq_regs(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return percpu_read(irq_regs);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct pt_regs *old_regs;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						old_regs = get_irq_regs();
 | 
				
			||||||
 | 
						percpu_write(irq_regs, new_regs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return old_regs;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* _ASM_X86_IRQ_REGS_32_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,31 +0,0 @@
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Per-cpu current frame pointer - the location of the last exception frame on
 | 
					 | 
				
			||||||
 * the stack, stored in the per-cpu area.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Jeremy Fitzhardinge <jeremy@goop.org>
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
#ifndef _ASM_X86_IRQ_REGS_32_H
 | 
					 | 
				
			||||||
#define _ASM_X86_IRQ_REGS_32_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <asm/percpu.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define ARCH_HAS_OWN_IRQ_REGS
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
DECLARE_PER_CPU(struct pt_regs *, irq_regs);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline struct pt_regs *get_irq_regs(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return x86_read_percpu(irq_regs);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct pt_regs *old_regs;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	old_regs = get_irq_regs();
 | 
					 | 
				
			||||||
	x86_write_percpu(irq_regs, new_regs);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return old_regs;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_IRQ_REGS_32_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -1 +0,0 @@
 | 
				
			||||||
#include <asm-generic/irq_regs.h>
 | 
					 | 
				
			||||||
| 
						 | 
					@ -49,18 +49,20 @@
 | 
				
			||||||
 *  some of the following vectors are 'rare', they are merged
 | 
					 *  some of the following vectors are 'rare', they are merged
 | 
				
			||||||
 *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
 | 
					 *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
 | 
				
			||||||
 *  TLB, reschedule and local APIC vectors are performance-critical.
 | 
					 *  TLB, reschedule and local APIC vectors are performance-critical.
 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
 | 
					 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# define SPURIOUS_APIC_VECTOR		0xff
 | 
					# define SPURIOUS_APIC_VECTOR		0xff
 | 
				
			||||||
# define ERROR_APIC_VECTOR		0xfe
 | 
					# define ERROR_APIC_VECTOR		0xfe
 | 
				
			||||||
# define INVALIDATE_TLB_VECTOR		0xfd
 | 
					# define RESCHEDULE_VECTOR		0xfd
 | 
				
			||||||
# define RESCHEDULE_VECTOR		0xfc
 | 
					# define CALL_FUNCTION_VECTOR		0xfc
 | 
				
			||||||
# define CALL_FUNCTION_VECTOR		0xfb
 | 
					# define CALL_FUNCTION_SINGLE_VECTOR	0xfb
 | 
				
			||||||
# define CALL_FUNCTION_SINGLE_VECTOR	0xfa
 | 
					# define THERMAL_APIC_VECTOR		0xfa
 | 
				
			||||||
# define THERMAL_APIC_VECTOR		0xf0
 | 
					/* 0xf8 - 0xf9 : free */
 | 
				
			||||||
 | 
					# define INVALIDATE_TLB_VECTOR_END	0xf7
 | 
				
			||||||
 | 
					# define INVALIDATE_TLB_VECTOR_START	0xf0	/* f0-f7 used for TLB flush */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# define NUM_INVALIDATE_TLB_VECTORS	8
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -105,6 +107,8 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
 | 
					#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <asm/apicnum.h>	/* need MAX_IO_APICS */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifndef CONFIG_SPARSE_IRQ
 | 
					#ifndef CONFIG_SPARSE_IRQ
 | 
				
			||||||
# if NR_CPUS < MAX_IO_APICS
 | 
					# if NR_CPUS < MAX_IO_APICS
 | 
				
			||||||
#  define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
 | 
					#  define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
 | 
				
			||||||
| 
						 | 
					@ -112,11 +116,12 @@
 | 
				
			||||||
#  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
 | 
					#  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
 | 
				
			||||||
# endif
 | 
					# endif
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
 | 
					
 | 
				
			||||||
#  define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
 | 
					# define NR_IRQS					\
 | 
				
			||||||
# else
 | 
						((8 * NR_CPUS) > (32 * MAX_IO_APICS) ?		\
 | 
				
			||||||
#  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
 | 
							(NR_VECTORS + (8 * NR_CPUS)) :		\
 | 
				
			||||||
# endif
 | 
							(NR_VECTORS + (32 * MAX_IO_APICS)))	\
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#elif defined(CONFIG_X86_VOYAGER)
 | 
					#elif defined(CONFIG_X86_VOYAGER)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -11,10 +11,26 @@
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#ifdef CONFIG_X86_SMP
 | 
					#ifdef CONFIG_X86_SMP
 | 
				
			||||||
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
 | 
					BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
 | 
				
			||||||
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
 | 
					 | 
				
			||||||
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
 | 
					BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
 | 
				
			||||||
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
 | 
					BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
 | 
				
			||||||
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
 | 
					BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
 | 
				
			||||||
 | 
							 smp_invalidate_interrupt)
 | 
				
			||||||
 | 
					BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
 | 
				
			||||||
 | 
							 smp_invalidate_interrupt)
 | 
				
			||||||
 | 
					BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
 | 
				
			||||||
 | 
							 smp_invalidate_interrupt)
 | 
				
			||||||
 | 
					BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
 | 
				
			||||||
 | 
							 smp_invalidate_interrupt)
 | 
				
			||||||
 | 
					BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
 | 
				
			||||||
 | 
							 smp_invalidate_interrupt)
 | 
				
			||||||
 | 
					BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
 | 
				
			||||||
 | 
							 smp_invalidate_interrupt)
 | 
				
			||||||
 | 
					BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
 | 
				
			||||||
 | 
							 smp_invalidate_interrupt)
 | 
				
			||||||
 | 
					BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
 | 
				
			||||||
 | 
							 smp_invalidate_interrupt)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
 | 
				
			||||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 | 
					int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 | 
				
			||||||
void destroy_context(struct mm_struct *mm);
 | 
					void destroy_context(struct mm_struct *mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					
 | 
				
			||||||
# include "mmu_context_32.h"
 | 
					static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 | 
				
			||||||
#else
 | 
					{
 | 
				
			||||||
# include "mmu_context_64.h"
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
 | 
						if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
 | 
				
			||||||
 | 
							percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 | 
				
			||||||
 | 
								     struct task_struct *tsk)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned cpu = smp_processor_id();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (likely(prev != next)) {
 | 
				
			||||||
 | 
							/* stop flush ipis for the previous mm */
 | 
				
			||||||
 | 
							cpu_clear(cpu, prev->cpu_vm_mask);
 | 
				
			||||||
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
 | 
							percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 | 
				
			||||||
 | 
							percpu_write(cpu_tlbstate.active_mm, next);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
							cpu_set(cpu, next->cpu_vm_mask);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/* Re-load page tables */
 | 
				
			||||||
 | 
							load_cr3(next->pgd);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * load the LDT, if the LDT is different:
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							if (unlikely(prev->context.ldt != next->context.ldt))
 | 
				
			||||||
 | 
								load_LDT_nolock(&next->context);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
 | 
						else {
 | 
				
			||||||
 | 
							percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 | 
				
			||||||
 | 
							BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
 | 
				
			||||||
 | 
								/* We were in lazy tlb mode and leave_mm disabled
 | 
				
			||||||
 | 
								 * tlb flush IPI delivery. We must reload CR3
 | 
				
			||||||
 | 
								 * to make sure to use no freed page tables.
 | 
				
			||||||
 | 
								 */
 | 
				
			||||||
 | 
								load_cr3(next->pgd);
 | 
				
			||||||
 | 
								load_LDT_nolock(&next->context);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define activate_mm(prev, next)			\
 | 
					#define activate_mm(prev, next)			\
 | 
				
			||||||
do {						\
 | 
					do {						\
 | 
				
			||||||
| 
						 | 
					@ -33,5 +76,17 @@ do {						\
 | 
				
			||||||
	switch_mm((prev), (next), NULL);	\
 | 
						switch_mm((prev), (next), NULL);	\
 | 
				
			||||||
} while (0);
 | 
					} while (0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
 | 
					#define deactivate_mm(tsk, mm)			\
 | 
				
			||||||
 | 
					do {						\
 | 
				
			||||||
 | 
						loadsegment(gs, 0);			\
 | 
				
			||||||
 | 
					} while (0)
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					#define deactivate_mm(tsk, mm)			\
 | 
				
			||||||
 | 
					do {						\
 | 
				
			||||||
 | 
						load_gs_index(0);			\
 | 
				
			||||||
 | 
						loadsegment(fs, 0);			\
 | 
				
			||||||
 | 
					} while (0)
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _ASM_X86_MMU_CONTEXT_H */
 | 
					#endif /* _ASM_X86_MMU_CONTEXT_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,55 +0,0 @@
 | 
				
			||||||
#ifndef _ASM_X86_MMU_CONTEXT_32_H
 | 
					 | 
				
			||||||
#define _ASM_X86_MMU_CONTEXT_32_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					 | 
				
			||||||
	if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
 | 
					 | 
				
			||||||
		x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void switch_mm(struct mm_struct *prev,
 | 
					 | 
				
			||||||
			     struct mm_struct *next,
 | 
					 | 
				
			||||||
			     struct task_struct *tsk)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int cpu = smp_processor_id();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (likely(prev != next)) {
 | 
					 | 
				
			||||||
		/* stop flush ipis for the previous mm */
 | 
					 | 
				
			||||||
		cpu_clear(cpu, prev->cpu_vm_mask);
 | 
					 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					 | 
				
			||||||
		x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
 | 
					 | 
				
			||||||
		x86_write_percpu(cpu_tlbstate.active_mm, next);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
		cpu_set(cpu, next->cpu_vm_mask);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		/* Re-load page tables */
 | 
					 | 
				
			||||||
		load_cr3(next->pgd);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * load the LDT, if the LDT is different:
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		if (unlikely(prev->context.ldt != next->context.ldt))
 | 
					 | 
				
			||||||
			load_LDT_nolock(&next->context);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					 | 
				
			||||||
	else {
 | 
					 | 
				
			||||||
		x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
 | 
					 | 
				
			||||||
		BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
 | 
					 | 
				
			||||||
			/* We were in lazy tlb mode and leave_mm disabled
 | 
					 | 
				
			||||||
			 * tlb flush IPI delivery. We must reload %cr3.
 | 
					 | 
				
			||||||
			 */
 | 
					 | 
				
			||||||
			load_cr3(next->pgd);
 | 
					 | 
				
			||||||
			load_LDT_nolock(&next->context);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define deactivate_mm(tsk, mm)			\
 | 
					 | 
				
			||||||
	asm("movl %0,%%gs": :"r" (0));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_MMU_CONTEXT_32_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -1,54 +0,0 @@
 | 
				
			||||||
#ifndef _ASM_X86_MMU_CONTEXT_64_H
 | 
					 | 
				
			||||||
#define _ASM_X86_MMU_CONTEXT_64_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <asm/pda.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					 | 
				
			||||||
	if (read_pda(mmu_state) == TLBSTATE_OK)
 | 
					 | 
				
			||||||
		write_pda(mmu_state, TLBSTATE_LAZY);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 | 
					 | 
				
			||||||
			     struct task_struct *tsk)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned cpu = smp_processor_id();
 | 
					 | 
				
			||||||
	if (likely(prev != next)) {
 | 
					 | 
				
			||||||
		/* stop flush ipis for the previous mm */
 | 
					 | 
				
			||||||
		cpu_clear(cpu, prev->cpu_vm_mask);
 | 
					 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					 | 
				
			||||||
		write_pda(mmu_state, TLBSTATE_OK);
 | 
					 | 
				
			||||||
		write_pda(active_mm, next);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
		cpu_set(cpu, next->cpu_vm_mask);
 | 
					 | 
				
			||||||
		load_cr3(next->pgd);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (unlikely(next->context.ldt != prev->context.ldt))
 | 
					 | 
				
			||||||
			load_LDT_nolock(&next->context);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					 | 
				
			||||||
	else {
 | 
					 | 
				
			||||||
		write_pda(mmu_state, TLBSTATE_OK);
 | 
					 | 
				
			||||||
		if (read_pda(active_mm) != next)
 | 
					 | 
				
			||||||
			BUG();
 | 
					 | 
				
			||||||
		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
 | 
					 | 
				
			||||||
			/* We were in lazy tlb mode and leave_mm disabled
 | 
					 | 
				
			||||||
			 * tlb flush IPI delivery. We must reload CR3
 | 
					 | 
				
			||||||
			 * to make sure to use no freed page tables.
 | 
					 | 
				
			||||||
			 */
 | 
					 | 
				
			||||||
			load_cr3(next->pgd);
 | 
					 | 
				
			||||||
			load_LDT_nolock(&next->context);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define deactivate_mm(tsk, mm)			\
 | 
					 | 
				
			||||||
do {						\
 | 
					 | 
				
			||||||
	load_gs_index(0);			\
 | 
					 | 
				
			||||||
	asm volatile("movl %0,%%fs"::"r"(0));	\
 | 
					 | 
				
			||||||
} while (0)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_MMU_CONTEXT_64_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -24,17 +24,18 @@
 | 
				
			||||||
# endif
 | 
					# endif
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct intel_mp_floating {
 | 
					/* Intel MP Floating Pointer Structure */
 | 
				
			||||||
	char mpf_signature[4];		/* "_MP_"			*/
 | 
					struct mpf_intel {
 | 
				
			||||||
	unsigned int mpf_physptr;	/* Configuration table address	*/
 | 
						char signature[4];		/* "_MP_"			*/
 | 
				
			||||||
	unsigned char mpf_length;	/* Our length (paragraphs)	*/
 | 
						unsigned int physptr;		/* Configuration table address	*/
 | 
				
			||||||
	unsigned char mpf_specification;/* Specification version	*/
 | 
						unsigned char length;		/* Our length (paragraphs)	*/
 | 
				
			||||||
	unsigned char mpf_checksum;	/* Checksum (makes sum 0)	*/
 | 
						unsigned char specification;	/* Specification version	*/
 | 
				
			||||||
	unsigned char mpf_feature1;	/* Standard or configuration ?	*/
 | 
						unsigned char checksum;		/* Checksum (makes sum 0)	*/
 | 
				
			||||||
	unsigned char mpf_feature2;	/* Bit7 set for IMCR|PIC	*/
 | 
						unsigned char feature1;		/* Standard or configuration ?	*/
 | 
				
			||||||
	unsigned char mpf_feature3;	/* Unused (0)			*/
 | 
						unsigned char feature2;		/* Bit7 set for IMCR|PIC	*/
 | 
				
			||||||
	unsigned char mpf_feature4;	/* Unused (0)			*/
 | 
						unsigned char feature3;		/* Unused (0)			*/
 | 
				
			||||||
	unsigned char mpf_feature5;	/* Unused (0)			*/
 | 
						unsigned char feature4;		/* Unused (0)			*/
 | 
				
			||||||
 | 
						unsigned char feature5;		/* Unused (0)			*/
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define MPC_SIGNATURE "PCMP"
 | 
					#define MPC_SIGNATURE "PCMP"
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -13,8 +13,8 @@
 | 
				
			||||||
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
 | 
					#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
 | 
				
			||||||
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
 | 
					#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define IRQSTACK_ORDER 2
 | 
					#define IRQ_STACK_ORDER 2
 | 
				
			||||||
#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
 | 
					#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define STACKFAULT_STACK 1
 | 
					#define STACKFAULT_STACK 1
 | 
				
			||||||
#define DOUBLEFAULT_STACK 2
 | 
					#define DOUBLEFAULT_STACK 2
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -244,7 +244,8 @@ struct pv_mmu_ops {
 | 
				
			||||||
	void (*flush_tlb_user)(void);
 | 
						void (*flush_tlb_user)(void);
 | 
				
			||||||
	void (*flush_tlb_kernel)(void);
 | 
						void (*flush_tlb_kernel)(void);
 | 
				
			||||||
	void (*flush_tlb_single)(unsigned long addr);
 | 
						void (*flush_tlb_single)(unsigned long addr);
 | 
				
			||||||
	void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
 | 
						void (*flush_tlb_others)(const struct cpumask *cpus,
 | 
				
			||||||
 | 
									 struct mm_struct *mm,
 | 
				
			||||||
				 unsigned long va);
 | 
									 unsigned long va);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Hooks for allocating and freeing a pagetable top-level */
 | 
						/* Hooks for allocating and freeing a pagetable top-level */
 | 
				
			||||||
| 
						 | 
					@ -984,10 +985,11 @@ static inline void __flush_tlb_single(unsigned long addr)
 | 
				
			||||||
	PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
 | 
						PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
 | 
					static inline void flush_tlb_others(const struct cpumask *cpumask,
 | 
				
			||||||
 | 
									    struct mm_struct *mm,
 | 
				
			||||||
				    unsigned long va)
 | 
									    unsigned long va)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
 | 
						PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
 | 
					static inline int paravirt_pgd_alloc(struct mm_struct *mm)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,137 +0,0 @@
 | 
				
			||||||
#ifndef _ASM_X86_PDA_H
 | 
					 | 
				
			||||||
#define _ASM_X86_PDA_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifndef __ASSEMBLY__
 | 
					 | 
				
			||||||
#include <linux/stddef.h>
 | 
					 | 
				
			||||||
#include <linux/types.h>
 | 
					 | 
				
			||||||
#include <linux/cache.h>
 | 
					 | 
				
			||||||
#include <asm/page.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/* Per processor datastructure. %gs points to it while the kernel runs */
 | 
					 | 
				
			||||||
struct x8664_pda {
 | 
					 | 
				
			||||||
	struct task_struct *pcurrent;	/* 0  Current process */
 | 
					 | 
				
			||||||
	unsigned long data_offset;	/* 8 Per cpu data offset from linker
 | 
					 | 
				
			||||||
					   address */
 | 
					 | 
				
			||||||
	unsigned long kernelstack;	/* 16 top of kernel stack for current */
 | 
					 | 
				
			||||||
	unsigned long oldrsp;		/* 24 user rsp for system call */
 | 
					 | 
				
			||||||
	int irqcount;			/* 32 Irq nesting counter. Starts -1 */
 | 
					 | 
				
			||||||
	unsigned int cpunumber;		/* 36 Logical CPU number */
 | 
					 | 
				
			||||||
#ifdef CONFIG_CC_STACKPROTECTOR
 | 
					 | 
				
			||||||
	unsigned long stack_canary;	/* 40 stack canary value */
 | 
					 | 
				
			||||||
					/* gcc-ABI: this canary MUST be at
 | 
					 | 
				
			||||||
					   offset 40!!! */
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	char *irqstackptr;
 | 
					 | 
				
			||||||
	short nodenumber;		/* number of current node (32k max) */
 | 
					 | 
				
			||||||
	short in_bootmem;		/* pda lives in bootmem */
 | 
					 | 
				
			||||||
	unsigned int __softirq_pending;
 | 
					 | 
				
			||||||
	unsigned int __nmi_count;	/* number of NMI on this CPUs */
 | 
					 | 
				
			||||||
	short mmu_state;
 | 
					 | 
				
			||||||
	short isidle;
 | 
					 | 
				
			||||||
	struct mm_struct *active_mm;
 | 
					 | 
				
			||||||
	unsigned apic_timer_irqs;
 | 
					 | 
				
			||||||
	unsigned irq0_irqs;
 | 
					 | 
				
			||||||
	unsigned irq_resched_count;
 | 
					 | 
				
			||||||
	unsigned irq_call_count;
 | 
					 | 
				
			||||||
	unsigned irq_tlb_count;
 | 
					 | 
				
			||||||
	unsigned irq_thermal_count;
 | 
					 | 
				
			||||||
	unsigned irq_threshold_count;
 | 
					 | 
				
			||||||
	unsigned irq_spurious_count;
 | 
					 | 
				
			||||||
} ____cacheline_aligned_in_smp;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
extern struct x8664_pda **_cpu_pda;
 | 
					 | 
				
			||||||
extern void pda_init(int);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define cpu_pda(i) (_cpu_pda[i])
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * There is no fast way to get the base address of the PDA, all the accesses
 | 
					 | 
				
			||||||
 * have to mention %fs/%gs.  So it needs to be done this Torvaldian way.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
extern void __bad_pda_field(void) __attribute__((noreturn));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
 | 
					 | 
				
			||||||
 * all PDA accesses so it gets read/write dependencies right.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
extern struct x8664_pda _proxy_pda;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define pda_offset(field) offsetof(struct x8664_pda, field)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define pda_to_op(op, field, val)					\
 | 
					 | 
				
			||||||
do {									\
 | 
					 | 
				
			||||||
	typedef typeof(_proxy_pda.field) T__;				\
 | 
					 | 
				
			||||||
	if (0) { T__ tmp__; tmp__ = (val); }	/* type checking */	\
 | 
					 | 
				
			||||||
	switch (sizeof(_proxy_pda.field)) {				\
 | 
					 | 
				
			||||||
	case 2:								\
 | 
					 | 
				
			||||||
		asm(op "w %1,%%gs:%c2" :				\
 | 
					 | 
				
			||||||
		    "+m" (_proxy_pda.field) :				\
 | 
					 | 
				
			||||||
		    "ri" ((T__)val),					\
 | 
					 | 
				
			||||||
		    "i"(pda_offset(field)));				\
 | 
					 | 
				
			||||||
		break;							\
 | 
					 | 
				
			||||||
	case 4:								\
 | 
					 | 
				
			||||||
		asm(op "l %1,%%gs:%c2" :				\
 | 
					 | 
				
			||||||
		    "+m" (_proxy_pda.field) :				\
 | 
					 | 
				
			||||||
		    "ri" ((T__)val),					\
 | 
					 | 
				
			||||||
		    "i" (pda_offset(field)));				\
 | 
					 | 
				
			||||||
		break;							\
 | 
					 | 
				
			||||||
	case 8:								\
 | 
					 | 
				
			||||||
		asm(op "q %1,%%gs:%c2":					\
 | 
					 | 
				
			||||||
		    "+m" (_proxy_pda.field) :				\
 | 
					 | 
				
			||||||
		    "ri" ((T__)val),					\
 | 
					 | 
				
			||||||
		    "i"(pda_offset(field)));				\
 | 
					 | 
				
			||||||
		break;							\
 | 
					 | 
				
			||||||
	default:							\
 | 
					 | 
				
			||||||
		__bad_pda_field();					\
 | 
					 | 
				
			||||||
	}								\
 | 
					 | 
				
			||||||
} while (0)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define pda_from_op(op, field)			\
 | 
					 | 
				
			||||||
({						\
 | 
					 | 
				
			||||||
	typeof(_proxy_pda.field) ret__;		\
 | 
					 | 
				
			||||||
	switch (sizeof(_proxy_pda.field)) {	\
 | 
					 | 
				
			||||||
	case 2:					\
 | 
					 | 
				
			||||||
		asm(op "w %%gs:%c1,%0" :	\
 | 
					 | 
				
			||||||
		    "=r" (ret__) :		\
 | 
					 | 
				
			||||||
		    "i" (pda_offset(field)),	\
 | 
					 | 
				
			||||||
		    "m" (_proxy_pda.field));	\
 | 
					 | 
				
			||||||
		break;				\
 | 
					 | 
				
			||||||
	case 4:					\
 | 
					 | 
				
			||||||
		asm(op "l %%gs:%c1,%0":		\
 | 
					 | 
				
			||||||
		    "=r" (ret__):		\
 | 
					 | 
				
			||||||
		    "i" (pda_offset(field)),	\
 | 
					 | 
				
			||||||
		    "m" (_proxy_pda.field));	\
 | 
					 | 
				
			||||||
		break;				\
 | 
					 | 
				
			||||||
	case 8:					\
 | 
					 | 
				
			||||||
		asm(op "q %%gs:%c1,%0":		\
 | 
					 | 
				
			||||||
		    "=r" (ret__) :		\
 | 
					 | 
				
			||||||
		    "i" (pda_offset(field)),	\
 | 
					 | 
				
			||||||
		    "m" (_proxy_pda.field));	\
 | 
					 | 
				
			||||||
		break;				\
 | 
					 | 
				
			||||||
	default:				\
 | 
					 | 
				
			||||||
		__bad_pda_field();		\
 | 
					 | 
				
			||||||
	}					\
 | 
					 | 
				
			||||||
	ret__;					\
 | 
					 | 
				
			||||||
})
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define read_pda(field)		pda_from_op("mov", field)
 | 
					 | 
				
			||||||
#define write_pda(field, val)	pda_to_op("mov", field, val)
 | 
					 | 
				
			||||||
#define add_pda(field, val)	pda_to_op("add", field, val)
 | 
					 | 
				
			||||||
#define sub_pda(field, val)	pda_to_op("sub", field, val)
 | 
					 | 
				
			||||||
#define or_pda(field, val)	pda_to_op("or", field, val)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
 | 
					 | 
				
			||||||
#define test_and_clear_bit_pda(bit, field)				\
 | 
					 | 
				
			||||||
({									\
 | 
					 | 
				
			||||||
	int old__;							\
 | 
					 | 
				
			||||||
	asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0"			\
 | 
					 | 
				
			||||||
		     : "=r" (old__), "+m" (_proxy_pda.field)		\
 | 
					 | 
				
			||||||
		     : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
 | 
					 | 
				
			||||||
	old__;								\
 | 
					 | 
				
			||||||
})
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define PDA_STACKOFFSET (5*8)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_PDA_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -2,53 +2,12 @@
 | 
				
			||||||
#define _ASM_X86_PERCPU_H
 | 
					#define _ASM_X86_PERCPU_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
					#ifdef CONFIG_X86_64
 | 
				
			||||||
#include <linux/compiler.h>
 | 
					#define __percpu_seg		gs
 | 
				
			||||||
 | 
					#define __percpu_mov_op		movq
 | 
				
			||||||
/* Same as asm-generic/percpu.h, except that we store the per cpu offset
 | 
					#else
 | 
				
			||||||
   in the PDA. Longer term the PDA and every per cpu variable
 | 
					#define __percpu_seg		fs
 | 
				
			||||||
   should be just put into a single section and referenced directly
 | 
					#define __percpu_mov_op		movl
 | 
				
			||||||
   from %gs */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					 | 
				
			||||||
#include <asm/pda.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
 | 
					 | 
				
			||||||
#define __my_cpu_offset read_pda(data_offset)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define per_cpu_offset(x) (__per_cpu_offset(x))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
#include <asm-generic/percpu.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
DECLARE_PER_CPU(struct x8664_pda, pda);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * These are supposed to be implemented as a single instruction which
 | 
					 | 
				
			||||||
 * operates on the per-cpu data base segment.  x86-64 doesn't have
 | 
					 | 
				
			||||||
 * that yet, so this is a fairly inefficient workaround for the
 | 
					 | 
				
			||||||
 * meantime.  The single instruction is atomic with respect to
 | 
					 | 
				
			||||||
 * preemption and interrupts, so we need to explicitly disable
 | 
					 | 
				
			||||||
 * interrupts here to achieve the same effect.  However, because it
 | 
					 | 
				
			||||||
 * can be used from within interrupt-disable/enable, we can't actually
 | 
					 | 
				
			||||||
 * disable interrupts; disabling preemption is enough.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
#define x86_read_percpu(var)						\
 | 
					 | 
				
			||||||
	({								\
 | 
					 | 
				
			||||||
		typeof(per_cpu_var(var)) __tmp;				\
 | 
					 | 
				
			||||||
		preempt_disable();					\
 | 
					 | 
				
			||||||
		__tmp = __get_cpu_var(var);				\
 | 
					 | 
				
			||||||
		preempt_enable();					\
 | 
					 | 
				
			||||||
		__tmp;							\
 | 
					 | 
				
			||||||
	})
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define x86_write_percpu(var, val)					\
 | 
					 | 
				
			||||||
	do {								\
 | 
					 | 
				
			||||||
		preempt_disable();					\
 | 
					 | 
				
			||||||
		__get_cpu_var(var) = (val);				\
 | 
					 | 
				
			||||||
		preempt_enable();					\
 | 
					 | 
				
			||||||
	} while(0)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#else /* CONFIG_X86_64 */
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef __ASSEMBLY__
 | 
					#ifdef __ASSEMBLY__
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -66,46 +25,25 @@ DECLARE_PER_CPU(struct x8664_pda, pda);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
#define PER_CPU(var, reg)						\
 | 
					#define PER_CPU(var, reg)						\
 | 
				
			||||||
	movl %fs:per_cpu__##this_cpu_off, reg;		\
 | 
						__percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg;	\
 | 
				
			||||||
	lea per_cpu__##var(reg), reg
 | 
						lea per_cpu__##var(reg), reg
 | 
				
			||||||
#define PER_CPU_VAR(var)	%fs:per_cpu__##var
 | 
					#define PER_CPU_VAR(var)	%__percpu_seg:per_cpu__##var
 | 
				
			||||||
#else /* ! SMP */
 | 
					#else /* ! SMP */
 | 
				
			||||||
#define PER_CPU(var, reg)						\
 | 
					#define PER_CPU(var, reg)						\
 | 
				
			||||||
	movl $per_cpu__##var, reg
 | 
						__percpu_mov_op $per_cpu__##var, reg
 | 
				
			||||||
#define PER_CPU_VAR(var)	per_cpu__##var
 | 
					#define PER_CPU_VAR(var)	per_cpu__##var
 | 
				
			||||||
#endif	/* SMP */
 | 
					#endif	/* SMP */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else /* ...!ASSEMBLY */
 | 
					#else /* ...!ASSEMBLY */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					#include <linux/stringify.h>
 | 
				
			||||||
 * PER_CPU finds an address of a per-cpu variable.
 | 
					
 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Args:
 | 
					 | 
				
			||||||
 *    var - variable name
 | 
					 | 
				
			||||||
 *    cpu - 32bit register containing the current CPU number
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * The resulting address is stored in the "cpu" argument.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Example:
 | 
					 | 
				
			||||||
 *    PER_CPU(cpu_gdt_descr, %ebx)
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
 | 
					#define __percpu_arg(x)		"%%"__stringify(__percpu_seg)":%P" #x
 | 
				
			||||||
#define __my_cpu_offset x86_read_percpu(this_cpu_off)
 | 
					#define __my_cpu_offset		percpu_read(this_cpu_off)
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
 | 
					#define __percpu_arg(x)		"%" #x
 | 
				
			||||||
#define __percpu_seg "%%fs:"
 | 
					#endif
 | 
				
			||||||
 | 
					 | 
				
			||||||
#else  /* !SMP */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define __percpu_seg ""
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif	/* SMP */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <asm-generic/percpu.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/* We can use this directly for local CPU (faster). */
 | 
					 | 
				
			||||||
DECLARE_PER_CPU(unsigned long, this_cpu_off);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* For arch-specific code, we can use direct single-insn ops (they
 | 
					/* For arch-specific code, we can use direct single-insn ops (they
 | 
				
			||||||
 * don't give an lvalue though). */
 | 
					 * don't give an lvalue though). */
 | 
				
			||||||
| 
						 | 
					@ -120,20 +58,25 @@ do {							\
 | 
				
			||||||
	}						\
 | 
						}						\
 | 
				
			||||||
	switch (sizeof(var)) {				\
 | 
						switch (sizeof(var)) {				\
 | 
				
			||||||
	case 1:						\
 | 
						case 1:						\
 | 
				
			||||||
		asm(op "b %1,"__percpu_seg"%0"		\
 | 
							asm(op "b %1,"__percpu_arg(0)		\
 | 
				
			||||||
		    : "+m" (var)			\
 | 
							    : "+m" (var)			\
 | 
				
			||||||
		    : "ri" ((T__)val));			\
 | 
							    : "ri" ((T__)val));			\
 | 
				
			||||||
		break;					\
 | 
							break;					\
 | 
				
			||||||
	case 2:						\
 | 
						case 2:						\
 | 
				
			||||||
		asm(op "w %1,"__percpu_seg"%0"		\
 | 
							asm(op "w %1,"__percpu_arg(0)		\
 | 
				
			||||||
		    : "+m" (var)			\
 | 
							    : "+m" (var)			\
 | 
				
			||||||
		    : "ri" ((T__)val));			\
 | 
							    : "ri" ((T__)val));			\
 | 
				
			||||||
		break;					\
 | 
							break;					\
 | 
				
			||||||
	case 4:						\
 | 
						case 4:						\
 | 
				
			||||||
		asm(op "l %1,"__percpu_seg"%0"		\
 | 
							asm(op "l %1,"__percpu_arg(0)		\
 | 
				
			||||||
		    : "+m" (var)			\
 | 
							    : "+m" (var)			\
 | 
				
			||||||
		    : "ri" ((T__)val));			\
 | 
							    : "ri" ((T__)val));			\
 | 
				
			||||||
		break;					\
 | 
							break;					\
 | 
				
			||||||
 | 
						case 8:						\
 | 
				
			||||||
 | 
							asm(op "q %1,"__percpu_arg(0)		\
 | 
				
			||||||
 | 
							    : "+m" (var)			\
 | 
				
			||||||
 | 
							    : "re" ((T__)val));			\
 | 
				
			||||||
 | 
							break;					\
 | 
				
			||||||
	default: __bad_percpu_size();			\
 | 
						default: __bad_percpu_size();			\
 | 
				
			||||||
	}						\
 | 
						}						\
 | 
				
			||||||
} while (0)
 | 
					} while (0)
 | 
				
			||||||
| 
						 | 
					@ -143,17 +86,22 @@ do {							\
 | 
				
			||||||
	typeof(var) ret__;				\
 | 
						typeof(var) ret__;				\
 | 
				
			||||||
	switch (sizeof(var)) {				\
 | 
						switch (sizeof(var)) {				\
 | 
				
			||||||
	case 1:						\
 | 
						case 1:						\
 | 
				
			||||||
		asm(op "b "__percpu_seg"%1,%0"		\
 | 
							asm(op "b "__percpu_arg(1)",%0"		\
 | 
				
			||||||
		    : "=r" (ret__)			\
 | 
							    : "=r" (ret__)			\
 | 
				
			||||||
		    : "m" (var));			\
 | 
							    : "m" (var));			\
 | 
				
			||||||
		break;					\
 | 
							break;					\
 | 
				
			||||||
	case 2:						\
 | 
						case 2:						\
 | 
				
			||||||
		asm(op "w "__percpu_seg"%1,%0"		\
 | 
							asm(op "w "__percpu_arg(1)",%0"		\
 | 
				
			||||||
		    : "=r" (ret__)			\
 | 
							    : "=r" (ret__)			\
 | 
				
			||||||
		    : "m" (var));			\
 | 
							    : "m" (var));			\
 | 
				
			||||||
		break;					\
 | 
							break;					\
 | 
				
			||||||
	case 4:						\
 | 
						case 4:						\
 | 
				
			||||||
		asm(op "l "__percpu_seg"%1,%0"		\
 | 
							asm(op "l "__percpu_arg(1)",%0"		\
 | 
				
			||||||
 | 
							    : "=r" (ret__)			\
 | 
				
			||||||
 | 
							    : "m" (var));			\
 | 
				
			||||||
 | 
							break;					\
 | 
				
			||||||
 | 
						case 8:						\
 | 
				
			||||||
 | 
							asm(op "q "__percpu_arg(1)",%0"		\
 | 
				
			||||||
		    : "=r" (ret__)			\
 | 
							    : "=r" (ret__)			\
 | 
				
			||||||
		    : "m" (var));			\
 | 
							    : "m" (var));			\
 | 
				
			||||||
		break;					\
 | 
							break;					\
 | 
				
			||||||
| 
						 | 
					@ -162,13 +110,30 @@ do {							\
 | 
				
			||||||
	ret__;						\
 | 
						ret__;						\
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
 | 
					#define percpu_read(var)	percpu_from_op("mov", per_cpu__##var)
 | 
				
			||||||
#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
 | 
					#define percpu_write(var, val)	percpu_to_op("mov", per_cpu__##var, val)
 | 
				
			||||||
#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
 | 
					#define percpu_add(var, val)	percpu_to_op("add", per_cpu__##var, val)
 | 
				
			||||||
#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
 | 
					#define percpu_sub(var, val)	percpu_to_op("sub", per_cpu__##var, val)
 | 
				
			||||||
#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
 | 
					#define percpu_and(var, val)	percpu_to_op("and", per_cpu__##var, val)
 | 
				
			||||||
 | 
					#define percpu_or(var, val)	percpu_to_op("or", per_cpu__##var, val)
 | 
				
			||||||
 | 
					#define percpu_xor(var, val)	percpu_to_op("xor", per_cpu__##var, val)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* This is not atomic against other CPUs -- CPU preemption needs to be off */
 | 
				
			||||||
 | 
					#define x86_test_and_clear_bit_percpu(bit, var)				\
 | 
				
			||||||
 | 
					({									\
 | 
				
			||||||
 | 
						int old__;							\
 | 
				
			||||||
 | 
						asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0"		\
 | 
				
			||||||
 | 
							     : "=r" (old__), "+m" (per_cpu__##var)		\
 | 
				
			||||||
 | 
							     : "dIr" (bit));					\
 | 
				
			||||||
 | 
						old__;								\
 | 
				
			||||||
 | 
					})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <asm-generic/percpu.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* We can use this directly for local CPU (faster). */
 | 
				
			||||||
 | 
					DECLARE_PER_CPU(unsigned long, this_cpu_off);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* !__ASSEMBLY__ */
 | 
					#endif /* !__ASSEMBLY__ */
 | 
				
			||||||
#endif /* !CONFIG_X86_64 */
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -195,9 +160,9 @@ do {							\
 | 
				
			||||||
#define	early_per_cpu_ptr(_name) (_name##_early_ptr)
 | 
					#define	early_per_cpu_ptr(_name) (_name##_early_ptr)
 | 
				
			||||||
#define	early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
 | 
					#define	early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
 | 
				
			||||||
#define	early_per_cpu(_name, _cpu) 				\
 | 
					#define	early_per_cpu(_name, _cpu) 				\
 | 
				
			||||||
	(early_per_cpu_ptr(_name) ?				\
 | 
						*(early_per_cpu_ptr(_name) ?				\
 | 
				
			||||||
		early_per_cpu_ptr(_name)[_cpu] :		\
 | 
							&early_per_cpu_ptr(_name)[_cpu] :		\
 | 
				
			||||||
		per_cpu(_name, _cpu))
 | 
							&per_cpu(_name, _cpu))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else	/* !CONFIG_SMP */
 | 
					#else	/* !CONFIG_SMP */
 | 
				
			||||||
#define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)		\
 | 
					#define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)		\
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -11,7 +11,6 @@
 | 
				
			||||||
#include <asm/processor.h>
 | 
					#include <asm/processor.h>
 | 
				
			||||||
#include <linux/bitops.h>
 | 
					#include <linux/bitops.h>
 | 
				
			||||||
#include <linux/threads.h>
 | 
					#include <linux/threads.h>
 | 
				
			||||||
#include <asm/pda.h>
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern pud_t level3_kernel_pgt[512];
 | 
					extern pud_t level3_kernel_pgt[512];
 | 
				
			||||||
extern pud_t level3_ident_pgt[512];
 | 
					extern pud_t level3_ident_pgt[512];
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -378,6 +378,22 @@ union thread_xstate {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
					#ifdef CONFIG_X86_64
 | 
				
			||||||
DECLARE_PER_CPU(struct orig_ist, orig_ist);
 | 
					DECLARE_PER_CPU(struct orig_ist, orig_ist);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					union irq_stack_union {
 | 
				
			||||||
 | 
						char irq_stack[IRQ_STACK_SIZE];
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * GCC hardcodes the stack canary as %gs:40.  Since the
 | 
				
			||||||
 | 
						 * irq_stack is the object at %gs:0, we reserve the bottom
 | 
				
			||||||
 | 
						 * 48 bytes of the irq stack for the canary.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						struct {
 | 
				
			||||||
 | 
							char gs_base[40];
 | 
				
			||||||
 | 
							unsigned long stack_canary;
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
 | 
				
			||||||
 | 
					DECLARE_PER_CPU(char *, irq_stack_ptr);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void print_cpu_info(struct cpuinfo_x86 *);
 | 
					extern void print_cpu_info(struct cpuinfo_x86 *);
 | 
				
			||||||
| 
						 | 
					@ -754,7 +770,6 @@ extern struct desc_ptr		early_gdt_descr;
 | 
				
			||||||
extern void cpu_set_gdt(int);
 | 
					extern void cpu_set_gdt(int);
 | 
				
			||||||
extern void switch_to_new_gdt(void);
 | 
					extern void switch_to_new_gdt(void);
 | 
				
			||||||
extern void cpu_init(void);
 | 
					extern void cpu_init(void);
 | 
				
			||||||
extern void init_gdt(int cpu);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned long get_debugctlmsr(void)
 | 
					static inline unsigned long get_debugctlmsr(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -100,7 +100,6 @@ extern unsigned long init_pg_tables_start;
 | 
				
			||||||
extern unsigned long init_pg_tables_end;
 | 
					extern unsigned long init_pg_tables_end;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
void __init x86_64_init_pda(void);
 | 
					 | 
				
			||||||
void __init x86_64_start_kernel(char *real_mode);
 | 
					void __init x86_64_start_kernel(char *real_mode);
 | 
				
			||||||
void __init x86_64_start_reservations(char *real_mode_data);
 | 
					void __init x86_64_start_reservations(char *real_mode_data);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -15,34 +15,8 @@
 | 
				
			||||||
#  include <asm/io_apic.h>
 | 
					#  include <asm/io_apic.h>
 | 
				
			||||||
# endif
 | 
					# endif
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
#include <asm/pda.h>
 | 
					 | 
				
			||||||
#include <asm/thread_info.h>
 | 
					#include <asm/thread_info.h>
 | 
				
			||||||
 | 
					#include <asm/cpumask.h>
 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
extern cpumask_var_t cpu_callin_mask;
 | 
					 | 
				
			||||||
extern cpumask_var_t cpu_callout_mask;
 | 
					 | 
				
			||||||
extern cpumask_var_t cpu_initialized_mask;
 | 
					 | 
				
			||||||
extern cpumask_var_t cpu_sibling_setup_mask;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#else /* CONFIG_X86_32 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
extern cpumask_t cpu_callin_map;
 | 
					 | 
				
			||||||
extern cpumask_t cpu_callout_map;
 | 
					 | 
				
			||||||
extern cpumask_t cpu_initialized;
 | 
					 | 
				
			||||||
extern cpumask_t cpu_sibling_setup_map;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define cpu_callin_mask		((struct cpumask *)&cpu_callin_map)
 | 
					 | 
				
			||||||
#define cpu_callout_mask	((struct cpumask *)&cpu_callout_map)
 | 
					 | 
				
			||||||
#define cpu_initialized_mask	((struct cpumask *)&cpu_initialized)
 | 
					 | 
				
			||||||
#define cpu_sibling_setup_mask	((struct cpumask *)&cpu_sibling_setup_map)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* CONFIG_X86_32 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
extern void (*mtrr_hook)(void);
 | 
					 | 
				
			||||||
extern void zap_low_mappings(void);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
extern int __cpuinit get_local_pda(int cpu);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern int smp_num_siblings;
 | 
					extern int smp_num_siblings;
 | 
				
			||||||
extern unsigned int num_processors;
 | 
					extern unsigned int num_processors;
 | 
				
			||||||
| 
						 | 
					@ -50,9 +24,7 @@ extern unsigned int num_processors;
 | 
				
			||||||
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 | 
					DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 | 
				
			||||||
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 | 
					DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 | 
				
			||||||
DECLARE_PER_CPU(u16, cpu_llc_id);
 | 
					DECLARE_PER_CPU(u16, cpu_llc_id);
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					 | 
				
			||||||
DECLARE_PER_CPU(int, cpu_number);
 | 
					DECLARE_PER_CPU(int, cpu_number);
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct cpumask *cpu_sibling_mask(int cpu)
 | 
					static inline struct cpumask *cpu_sibling_mask(int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -167,8 +139,6 @@ void play_dead_common(void);
 | 
				
			||||||
void native_send_call_func_ipi(const struct cpumask *mask);
 | 
					void native_send_call_func_ipi(const struct cpumask *mask);
 | 
				
			||||||
void native_send_call_func_single_ipi(int cpu);
 | 
					void native_send_call_func_single_ipi(int cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void prefill_possible_map(void);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void smp_store_cpu_info(int id);
 | 
					void smp_store_cpu_info(int id);
 | 
				
			||||||
#define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
 | 
					#define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -177,10 +147,6 @@ static inline int num_booting_cpus(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return cpumask_weight(cpu_callout_mask);
 | 
						return cpumask_weight(cpu_callout_mask);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#else
 | 
					 | 
				
			||||||
static inline void prefill_possible_map(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#endif /* CONFIG_SMP */
 | 
					#endif /* CONFIG_SMP */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern unsigned disabled_cpus __cpuinitdata;
 | 
					extern unsigned disabled_cpus __cpuinitdata;
 | 
				
			||||||
| 
						 | 
					@ -191,11 +157,11 @@ extern unsigned disabled_cpus __cpuinitdata;
 | 
				
			||||||
 * from the initial startup. We map APIC_BASE very early in page_setup(),
 | 
					 * from the initial startup. We map APIC_BASE very early in page_setup(),
 | 
				
			||||||
 * so this is correct in the x86 case.
 | 
					 * so this is correct in the x86 case.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
 | 
					#define raw_smp_processor_id() (percpu_read(cpu_number))
 | 
				
			||||||
extern int safe_smp_processor_id(void);
 | 
					extern int safe_smp_processor_id(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#elif defined(CONFIG_X86_64_SMP)
 | 
					#elif defined(CONFIG_X86_64_SMP)
 | 
				
			||||||
#define raw_smp_processor_id()	read_pda(cpunumber)
 | 
					#define raw_smp_processor_id() (percpu_read(cpu_number))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define stack_smp_processor_id()					\
 | 
					#define stack_smp_processor_id()					\
 | 
				
			||||||
({								\
 | 
					({								\
 | 
				
			||||||
| 
						 | 
					@ -205,10 +171,6 @@ extern int safe_smp_processor_id(void);
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
#define safe_smp_processor_id()		smp_processor_id()
 | 
					#define safe_smp_processor_id()		smp_processor_id()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
 | 
					 | 
				
			||||||
#define cpu_physical_id(cpu)		boot_cpu_physical_apicid
 | 
					 | 
				
			||||||
#define safe_smp_processor_id()		0
 | 
					 | 
				
			||||||
#define stack_smp_processor_id() 	0
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_LOCAL_APIC
 | 
					#ifdef CONFIG_X86_LOCAL_APIC
 | 
				
			||||||
| 
						 | 
					@ -251,11 +213,5 @@ static inline int hard_smp_processor_id(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* CONFIG_X86_LOCAL_APIC */
 | 
					#endif /* CONFIG_X86_LOCAL_APIC */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
 | 
					 | 
				
			||||||
extern unsigned char boot_cpu_id;
 | 
					 | 
				
			||||||
#else
 | 
					 | 
				
			||||||
#define boot_cpu_id	0
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* __ASSEMBLY__ */
 | 
					#endif /* __ASSEMBLY__ */
 | 
				
			||||||
#endif /* _ASM_X86_SMP_H */
 | 
					#endif /* _ASM_X86_SMP_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										38
									
								
								arch/x86/include/asm/stackprotector.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								arch/x86/include/asm/stackprotector.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,38 @@
 | 
				
			||||||
 | 
					#ifndef _ASM_STACKPROTECTOR_H
 | 
				
			||||||
 | 
					#define _ASM_STACKPROTECTOR_H 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <asm/tsc.h>
 | 
				
			||||||
 | 
					#include <asm/processor.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Initialize the stackprotector canary value.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * NOTE: this must only be called from functions that never return,
 | 
				
			||||||
 | 
					 * and it must always be inlined.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static __always_inline void boot_init_stack_canary(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						u64 canary;
 | 
				
			||||||
 | 
						u64 tsc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Build time only check to make sure the stack_canary is at
 | 
				
			||||||
 | 
						 * offset 40 in the pda; this is a gcc ABI requirement
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * We both use the random pool and the current TSC as a source
 | 
				
			||||||
 | 
						 * of randomness. The TSC only matters for very early init,
 | 
				
			||||||
 | 
						 * there it already has some randomness on most systems. Later
 | 
				
			||||||
 | 
						 * on during the bootup the random pool has true entropy too.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						get_random_bytes(&canary, sizeof(canary));
 | 
				
			||||||
 | 
						tsc = __native_read_tsc();
 | 
				
			||||||
 | 
						canary += tsc + (tsc << 32UL);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						current->stack_canary = canary;
 | 
				
			||||||
 | 
						percpu_write(irq_stack_union.stack_canary, canary);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					@ -86,6 +86,20 @@ do {									\
 | 
				
			||||||
	, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
 | 
						, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
 | 
				
			||||||
	  "r12", "r13", "r14", "r15"
 | 
						  "r12", "r13", "r14", "r15"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_CC_STACKPROTECTOR
 | 
				
			||||||
 | 
					#define __switch_canary							  \
 | 
				
			||||||
 | 
						"movq %P[task_canary](%%rsi),%%r8\n\t"				  \
 | 
				
			||||||
 | 
						"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
 | 
				
			||||||
 | 
					#define __switch_canary_oparam						  \
 | 
				
			||||||
 | 
						, [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
 | 
				
			||||||
 | 
					#define __switch_canary_iparam						  \
 | 
				
			||||||
 | 
						, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
 | 
				
			||||||
 | 
					#else	/* CC_STACKPROTECTOR */
 | 
				
			||||||
 | 
					#define __switch_canary
 | 
				
			||||||
 | 
					#define __switch_canary_oparam
 | 
				
			||||||
 | 
					#define __switch_canary_iparam
 | 
				
			||||||
 | 
					#endif	/* CC_STACKPROTECTOR */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Save restore flags to clear handle leaking NT */
 | 
					/* Save restore flags to clear handle leaking NT */
 | 
				
			||||||
#define switch_to(prev, next, last) \
 | 
					#define switch_to(prev, next, last) \
 | 
				
			||||||
	asm volatile(SAVE_CONTEXT					  \
 | 
						asm volatile(SAVE_CONTEXT					  \
 | 
				
			||||||
| 
						 | 
					@ -94,19 +108,22 @@ do {									\
 | 
				
			||||||
	     "call __switch_to\n\t"					  \
 | 
						     "call __switch_to\n\t"					  \
 | 
				
			||||||
	     ".globl thread_return\n"					  \
 | 
						     ".globl thread_return\n"					  \
 | 
				
			||||||
	     "thread_return:\n\t"					  \
 | 
						     "thread_return:\n\t"					  \
 | 
				
			||||||
	     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
 | 
						     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
 | 
				
			||||||
 | 
						     __switch_canary						  \
 | 
				
			||||||
	     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
 | 
						     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
 | 
				
			||||||
	     LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"	  \
 | 
						     LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"	  \
 | 
				
			||||||
	     "movq %%rax,%%rdi\n\t" 					  \
 | 
						     "movq %%rax,%%rdi\n\t" 					  \
 | 
				
			||||||
	     "jc   ret_from_fork\n\t"					  \
 | 
						     "jc   ret_from_fork\n\t"					  \
 | 
				
			||||||
	     RESTORE_CONTEXT						  \
 | 
						     RESTORE_CONTEXT						  \
 | 
				
			||||||
	     : "=a" (last)					  	  \
 | 
						     : "=a" (last)					  	  \
 | 
				
			||||||
 | 
						       __switch_canary_oparam					  \
 | 
				
			||||||
	     : [next] "S" (next), [prev] "D" (prev),			  \
 | 
						     : [next] "S" (next), [prev] "D" (prev),			  \
 | 
				
			||||||
	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
 | 
						       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
 | 
				
			||||||
	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
 | 
						       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
 | 
				
			||||||
	       [tif_fork] "i" (TIF_FORK),			  	  \
 | 
						       [tif_fork] "i" (TIF_FORK),			  	  \
 | 
				
			||||||
	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
 | 
						       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
 | 
				
			||||||
	       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))  \
 | 
						       [current_task] "m" (per_cpu_var(current_task))		  \
 | 
				
			||||||
 | 
						       __switch_canary_iparam					  \
 | 
				
			||||||
	     : "memory", "cc" __EXTRA_CLOBBER)
 | 
						     : "memory", "cc" __EXTRA_CLOBBER)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -194,25 +194,21 @@ static inline struct thread_info *current_thread_info(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else /* X86_32 */
 | 
					#else /* X86_32 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <asm/pda.h>
 | 
					#include <asm/percpu.h>
 | 
				
			||||||
 | 
					#define KERNEL_STACK_OFFSET (5*8)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * macros/functions for gaining access to the thread information structure
 | 
					 * macros/functions for gaining access to the thread information structure
 | 
				
			||||||
 * preempt_count needs to be 1 initially, until the scheduler is functional.
 | 
					 * preempt_count needs to be 1 initially, until the scheduler is functional.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#ifndef __ASSEMBLY__
 | 
					#ifndef __ASSEMBLY__
 | 
				
			||||||
 | 
					DECLARE_PER_CPU(unsigned long, kernel_stack);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct thread_info *current_thread_info(void)
 | 
					static inline struct thread_info *current_thread_info(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct thread_info *ti;
 | 
						struct thread_info *ti;
 | 
				
			||||||
	ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
 | 
						ti = (void *)(percpu_read(kernel_stack) +
 | 
				
			||||||
	return ti;
 | 
							      KERNEL_STACK_OFFSET - THREAD_SIZE);
 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/* do not use in interrupt context */
 | 
					 | 
				
			||||||
static inline struct thread_info *stack_thread_info(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct thread_info *ti;
 | 
					 | 
				
			||||||
	asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
 | 
					 | 
				
			||||||
	return ti;
 | 
						return ti;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -220,8 +216,8 @@ static inline struct thread_info *stack_thread_info(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* how to get the thread information struct from ASM */
 | 
					/* how to get the thread information struct from ASM */
 | 
				
			||||||
#define GET_THREAD_INFO(reg) \
 | 
					#define GET_THREAD_INFO(reg) \
 | 
				
			||||||
	movq %gs:pda_kernelstack,reg ; \
 | 
						movq PER_CPU_VAR(kernel_stack),reg ; \
 | 
				
			||||||
	subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
 | 
						subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 | 
				
			||||||
		__flush_tlb();
 | 
							__flush_tlb();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void native_flush_tlb_others(const cpumask_t *cpumask,
 | 
					static inline void native_flush_tlb_others(const struct cpumask *cpumask,
 | 
				
			||||||
					   struct mm_struct *mm,
 | 
										   struct mm_struct *mm,
 | 
				
			||||||
					   unsigned long va)
 | 
										   unsigned long va)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -142,31 +142,28 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 | 
				
			||||||
	flush_tlb_mm(vma->vm_mm);
 | 
						flush_tlb_mm(vma->vm_mm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
 | 
					void native_flush_tlb_others(const struct cpumask *cpumask,
 | 
				
			||||||
			     unsigned long va);
 | 
								     struct mm_struct *mm, unsigned long va);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define TLBSTATE_OK	1
 | 
					#define TLBSTATE_OK	1
 | 
				
			||||||
#define TLBSTATE_LAZY	2
 | 
					#define TLBSTATE_LAZY	2
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					 | 
				
			||||||
struct tlb_state {
 | 
					struct tlb_state {
 | 
				
			||||||
	struct mm_struct *active_mm;
 | 
						struct mm_struct *active_mm;
 | 
				
			||||||
	int state;
 | 
						int state;
 | 
				
			||||||
	char __cacheline_padding[L1_CACHE_BYTES-8];
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
 | 
					DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void reset_lazy_tlbstate(void);
 | 
					 | 
				
			||||||
#else
 | 
					 | 
				
			||||||
static inline void reset_lazy_tlbstate(void)
 | 
					static inline void reset_lazy_tlbstate(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						percpu_write(cpu_tlbstate.state, 0);
 | 
				
			||||||
 | 
						percpu_write(cpu_tlbstate.active_mm, &init_mm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif	/* SMP */
 | 
					#endif	/* SMP */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifndef CONFIG_PARAVIRT
 | 
					#ifndef CONFIG_PARAVIRT
 | 
				
			||||||
#define flush_tlb_others(mask, mm, va)	native_flush_tlb_others(&mask, mm, va)
 | 
					#define flush_tlb_others(mask, mm, va)	native_flush_tlb_others(mask, mm, va)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void flush_tlb_kernel_range(unsigned long start,
 | 
					static inline void flush_tlb_kernel_range(unsigned long start,
 | 
				
			||||||
| 
						 | 
					@ -175,4 +172,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
 | 
				
			||||||
	flush_tlb_all();
 | 
						flush_tlb_all();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern void zap_low_mappings(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _ASM_X86_TLBFLUSH_H */
 | 
					#endif /* _ASM_X86_TLBFLUSH_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
 | 
				
			||||||
	return &node_to_cpumask_map[node];
 | 
						return &node_to_cpumask_map[node];
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void setup_node_to_cpumask_map(void) { }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else /* CONFIG_X86_64 */
 | 
					#else /* CONFIG_X86_64 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Mappings between node number and cpus on that node. */
 | 
					/* Mappings between node number and cpus on that node. */
 | 
				
			||||||
| 
						 | 
					@ -83,7 +85,8 @@ extern cpumask_t *node_to_cpumask_map;
 | 
				
			||||||
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
 | 
					DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Returns the number of the current Node. */
 | 
					/* Returns the number of the current Node. */
 | 
				
			||||||
#define numa_node_id()		read_pda(nodenumber)
 | 
					DECLARE_PER_CPU(int, node_number);
 | 
				
			||||||
 | 
					#define numa_node_id()		percpu_read(node_number)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
 | 
					#ifdef CONFIG_DEBUG_PER_CPU_MAPS
 | 
				
			||||||
extern int cpu_to_node(int cpu);
 | 
					extern int cpu_to_node(int cpu);
 | 
				
			||||||
| 
						 | 
					@ -102,10 +105,7 @@ static inline int cpu_to_node(int cpu)
 | 
				
			||||||
/* Same function but used if called before per_cpu areas are setup */
 | 
					/* Same function but used if called before per_cpu areas are setup */
 | 
				
			||||||
static inline int early_cpu_to_node(int cpu)
 | 
					static inline int early_cpu_to_node(int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (early_per_cpu_ptr(x86_cpu_to_node_map))
 | 
						return early_per_cpu(x86_cpu_to_node_map, cpu);
 | 
				
			||||||
		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return per_cpu(x86_cpu_to_node_map, cpu);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
 | 
					/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
 | 
				
			||||||
| 
						 | 
					@ -122,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
 | 
					#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern void setup_node_to_cpumask_map(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Replace default node_to_cpumask_ptr with optimized version
 | 
					 * Replace default node_to_cpumask_ptr with optimized version
 | 
				
			||||||
 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
 | 
					 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
 | 
				
			||||||
| 
						 | 
					@ -192,9 +194,20 @@ extern int __node_distance(int, int);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else /* !CONFIG_NUMA */
 | 
					#else /* !CONFIG_NUMA */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define numa_node_id()		0
 | 
					static inline int numa_node_id(void)
 | 
				
			||||||
#define	cpu_to_node(cpu)	0
 | 
					{
 | 
				
			||||||
#define	early_cpu_to_node(cpu)	0
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline int cpu_to_node(int cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline int early_cpu_to_node(int cpu)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline const cpumask_t *cpumask_of_node(int node)
 | 
					static inline const cpumask_t *cpumask_of_node(int node)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -209,6 +222,8 @@ static inline int node_to_first_cpu(int node)
 | 
				
			||||||
	return first_cpu(cpu_online_map);
 | 
						return first_cpu(cpu_online_map);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void setup_node_to_cpumask_map(void) { }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Replace default node_to_cpumask_ptr with optimized version
 | 
					 * Replace default node_to_cpumask_ptr with optimized version
 | 
				
			||||||
 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
 | 
					 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -13,6 +13,7 @@ extern unsigned char *trampoline_base;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern unsigned long init_rsp;
 | 
					extern unsigned long init_rsp;
 | 
				
			||||||
extern unsigned long initial_code;
 | 
					extern unsigned long initial_code;
 | 
				
			||||||
 | 
					extern unsigned long initial_gs;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
 | 
					#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
 | 
				
			||||||
#define TRAMPOLINE_BASE 0x6000
 | 
					#define TRAMPOLINE_BASE 0x6000
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										33
									
								
								arch/x86/include/asm/uv/uv.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								arch/x86/include/asm/uv/uv.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,33 @@
 | 
				
			||||||
 | 
					#ifndef _ASM_X86_UV_UV_H
 | 
				
			||||||
 | 
					#define _ASM_X86_UV_UV_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_UV
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern enum uv_system_type get_uv_system_type(void);
 | 
				
			||||||
 | 
					extern int is_uv_system(void);
 | 
				
			||||||
 | 
					extern void uv_cpu_init(void);
 | 
				
			||||||
 | 
					extern void uv_system_init(void);
 | 
				
			||||||
 | 
					extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
 | 
				
			||||||
 | 
					extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 | 
				
			||||||
 | 
											 struct mm_struct *mm,
 | 
				
			||||||
 | 
											 unsigned long va,
 | 
				
			||||||
 | 
											 unsigned int cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#else	/* X86_UV */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
 | 
				
			||||||
 | 
					static inline int is_uv_system(void)	{ return 0; }
 | 
				
			||||||
 | 
					static inline void uv_cpu_init(void)	{ }
 | 
				
			||||||
 | 
					static inline void uv_system_init(void)	{ }
 | 
				
			||||||
 | 
					static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
 | 
				
			||||||
 | 
					{ return 1; }
 | 
				
			||||||
 | 
					static inline const struct cpumask *
 | 
				
			||||||
 | 
					uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
 | 
				
			||||||
 | 
							    unsigned long va, unsigned int cpu)
 | 
				
			||||||
 | 
					{ return cpumask; }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif	/* X86_UV */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif	/* _ASM_X86_UV_UV_H */
 | 
				
			||||||
| 
						 | 
					@ -325,7 +325,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
 | 
				
			||||||
#define cpubit_isset(cpu, bau_local_cpumask) \
 | 
					#define cpubit_isset(cpu, bau_local_cpumask) \
 | 
				
			||||||
	test_bit((cpu), (bau_local_cpumask).bits)
 | 
						test_bit((cpu), (bau_local_cpumask).bits)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
 | 
					 | 
				
			||||||
extern void uv_bau_message_intr1(void);
 | 
					extern void uv_bau_message_intr1(void);
 | 
				
			||||||
extern void uv_bau_timeout_intr1(void);
 | 
					extern void uv_bau_timeout_intr1(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,11 +23,12 @@ nostackp := $(call cc-option, -fno-stack-protector)
 | 
				
			||||||
CFLAGS_vsyscall_64.o	:= $(PROFILING) -g0 $(nostackp)
 | 
					CFLAGS_vsyscall_64.o	:= $(PROFILING) -g0 $(nostackp)
 | 
				
			||||||
CFLAGS_hpet.o		:= $(nostackp)
 | 
					CFLAGS_hpet.o		:= $(nostackp)
 | 
				
			||||||
CFLAGS_tsc.o		:= $(nostackp)
 | 
					CFLAGS_tsc.o		:= $(nostackp)
 | 
				
			||||||
 | 
					CFLAGS_paravirt.o	:= $(nostackp)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
obj-y			:= process_$(BITS).o signal.o entry_$(BITS).o
 | 
					obj-y			:= process_$(BITS).o signal.o entry_$(BITS).o
 | 
				
			||||||
obj-y			+= traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
 | 
					obj-y			+= traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
 | 
				
			||||||
obj-y			+= time_$(BITS).o ioport.o ldt.o dumpstack.o
 | 
					obj-y			+= time_$(BITS).o ioport.o ldt.o dumpstack.o
 | 
				
			||||||
obj-y			+= setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
 | 
					obj-y			+= setup.o i8259.o irqinit_$(BITS).o
 | 
				
			||||||
obj-$(CONFIG_X86_VISWS)	+= visws_quirks.o
 | 
					obj-$(CONFIG_X86_VISWS)	+= visws_quirks.o
 | 
				
			||||||
obj-$(CONFIG_X86_32)	+= probe_roms_32.o
 | 
					obj-$(CONFIG_X86_32)	+= probe_roms_32.o
 | 
				
			||||||
obj-$(CONFIG_X86_32)	+= sys_i386_32.o i386_ksyms_32.o
 | 
					obj-$(CONFIG_X86_32)	+= sys_i386_32.o i386_ksyms_32.o
 | 
				
			||||||
| 
						 | 
					@ -57,9 +58,9 @@ obj-$(CONFIG_PCI)		+= early-quirks.o
 | 
				
			||||||
apm-y				:= apm_32.o
 | 
					apm-y				:= apm_32.o
 | 
				
			||||||
obj-$(CONFIG_APM)		+= apm.o
 | 
					obj-$(CONFIG_APM)		+= apm.o
 | 
				
			||||||
obj-$(CONFIG_X86_SMP)		+= smp.o
 | 
					obj-$(CONFIG_X86_SMP)		+= smp.o
 | 
				
			||||||
obj-$(CONFIG_X86_SMP)		+= smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o
 | 
					obj-$(CONFIG_X86_SMP)		+= smpboot.o tsc_sync.o ipi.o
 | 
				
			||||||
obj-$(CONFIG_X86_32_SMP)	+= smpcommon.o
 | 
					obj-$(CONFIG_SMP)		+= setup_percpu.o
 | 
				
			||||||
obj-$(CONFIG_X86_64_SMP)	+= tsc_sync.o smpcommon.o
 | 
					obj-$(CONFIG_X86_64_SMP)	+= tsc_sync.o
 | 
				
			||||||
obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline_$(BITS).o
 | 
					obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline_$(BITS).o
 | 
				
			||||||
obj-$(CONFIG_X86_MPPARSE)	+= mpparse.o
 | 
					obj-$(CONFIG_X86_MPPARSE)	+= mpparse.o
 | 
				
			||||||
obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o nmi.o
 | 
					obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o nmi.o
 | 
				
			||||||
| 
						 | 
					@ -114,10 +115,11 @@ obj-$(CONFIG_SWIOTLB)			+= pci-swiotlb_64.o # NB rename without _64
 | 
				
			||||||
###
 | 
					###
 | 
				
			||||||
# 64 bit specific files
 | 
					# 64 bit specific files
 | 
				
			||||||
ifeq ($(CONFIG_X86_64),y)
 | 
					ifeq ($(CONFIG_X86_64),y)
 | 
				
			||||||
        obj-y				+= genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
 | 
					        obj-y				+= genapic_64.o genapic_flat_64.o
 | 
				
			||||||
	obj-y				+= bios_uv.o uv_irq.o uv_sysfs.o
 | 
					 | 
				
			||||||
        obj-y				+= genx2apic_cluster.o
 | 
					        obj-y				+= genx2apic_cluster.o
 | 
				
			||||||
        obj-y				+= genx2apic_phys.o
 | 
					        obj-y				+= genx2apic_phys.o
 | 
				
			||||||
 | 
						obj-$(CONFIG_X86_UV)		+= genx2apic_uv_x.o tlb_uv.o
 | 
				
			||||||
 | 
						obj-$(CONFIG_X86_UV)		+= bios_uv.o uv_irq.o uv_sysfs.o
 | 
				
			||||||
        obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer_64.o
 | 
					        obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer_64.o
 | 
				
			||||||
        obj-$(CONFIG_AUDIT)		+= audit_64.o
 | 
					        obj-$(CONFIG_AUDIT)		+= audit_64.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -912,8 +912,8 @@ static u8 __init uniq_ioapic_id(u8 id)
 | 
				
			||||||
	DECLARE_BITMAP(used, 256);
 | 
						DECLARE_BITMAP(used, 256);
 | 
				
			||||||
	bitmap_zero(used, 256);
 | 
						bitmap_zero(used, 256);
 | 
				
			||||||
	for (i = 0; i < nr_ioapics; i++) {
 | 
						for (i = 0; i < nr_ioapics; i++) {
 | 
				
			||||||
		struct mp_config_ioapic *ia = &mp_ioapics[i];
 | 
							struct mpc_ioapic *ia = &mp_ioapics[i];
 | 
				
			||||||
		__set_bit(ia->mp_apicid, used);
 | 
							__set_bit(ia->apicid, used);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (!test_bit(id, used))
 | 
						if (!test_bit(id, used))
 | 
				
			||||||
		return id;
 | 
							return id;
 | 
				
			||||||
| 
						 | 
					@ -945,47 +945,47 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	idx = nr_ioapics;
 | 
						idx = nr_ioapics;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mp_ioapics[idx].mp_type = MP_IOAPIC;
 | 
						mp_ioapics[idx].type = MP_IOAPIC;
 | 
				
			||||||
	mp_ioapics[idx].mp_flags = MPC_APIC_USABLE;
 | 
						mp_ioapics[idx].flags = MPC_APIC_USABLE;
 | 
				
			||||||
	mp_ioapics[idx].mp_apicaddr = address;
 | 
						mp_ioapics[idx].apicaddr = address;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
 | 
						set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
 | 
				
			||||||
	mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id);
 | 
						mp_ioapics[idx].apicid = uniq_ioapic_id(id);
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
	mp_ioapics[idx].mp_apicver = io_apic_get_version(idx);
 | 
						mp_ioapics[idx].apicver = io_apic_get_version(idx);
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
	mp_ioapics[idx].mp_apicver = 0;
 | 
						mp_ioapics[idx].apicver = 0;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
 | 
						 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
 | 
				
			||||||
	 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
 | 
						 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid;
 | 
						mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
 | 
				
			||||||
	mp_ioapic_routing[idx].gsi_base = gsi_base;
 | 
						mp_ioapic_routing[idx].gsi_base = gsi_base;
 | 
				
			||||||
	mp_ioapic_routing[idx].gsi_end = gsi_base +
 | 
						mp_ioapic_routing[idx].gsi_end = gsi_base +
 | 
				
			||||||
	    io_apic_get_redir_entries(idx);
 | 
						    io_apic_get_redir_entries(idx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
 | 
						printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
 | 
				
			||||||
	       "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid,
 | 
						       "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
 | 
				
			||||||
	       mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr,
 | 
						       mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
 | 
				
			||||||
	       mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
 | 
						       mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nr_ioapics++;
 | 
						nr_ioapics++;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void assign_to_mp_irq(struct mp_config_intsrc *m,
 | 
					static void assign_to_mp_irq(struct mpc_intsrc *m,
 | 
				
			||||||
				    struct mp_config_intsrc *mp_irq)
 | 
									    struct mpc_intsrc *mp_irq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	memcpy(mp_irq, m, sizeof(struct mp_config_intsrc));
 | 
						memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int mp_irq_cmp(struct mp_config_intsrc *mp_irq,
 | 
					static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
 | 
				
			||||||
				struct mp_config_intsrc *m)
 | 
									struct mpc_intsrc *m)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc));
 | 
						return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void save_mp_irq(struct mp_config_intsrc *m)
 | 
					static void save_mp_irq(struct mpc_intsrc *m)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1003,7 +1003,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int ioapic;
 | 
						int ioapic;
 | 
				
			||||||
	int pin;
 | 
						int pin;
 | 
				
			||||||
	struct mp_config_intsrc mp_irq;
 | 
						struct mpc_intsrc mp_irq;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Convert 'gsi' to 'ioapic.pin'.
 | 
						 * Convert 'gsi' to 'ioapic.pin'.
 | 
				
			||||||
| 
						 | 
					@ -1021,13 +1021,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
 | 
				
			||||||
	if ((bus_irq == 0) && (trigger == 3))
 | 
						if ((bus_irq == 0) && (trigger == 3))
 | 
				
			||||||
		trigger = 1;
 | 
							trigger = 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mp_irq.mp_type = MP_INTSRC;
 | 
						mp_irq.type = MP_INTSRC;
 | 
				
			||||||
	mp_irq.mp_irqtype = mp_INT;
 | 
						mp_irq.irqtype = mp_INT;
 | 
				
			||||||
	mp_irq.mp_irqflag = (trigger << 2) | polarity;
 | 
						mp_irq.irqflag = (trigger << 2) | polarity;
 | 
				
			||||||
	mp_irq.mp_srcbus = MP_ISA_BUS;
 | 
						mp_irq.srcbus = MP_ISA_BUS;
 | 
				
			||||||
	mp_irq.mp_srcbusirq = bus_irq;	/* IRQ */
 | 
						mp_irq.srcbusirq = bus_irq;	/* IRQ */
 | 
				
			||||||
	mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */
 | 
						mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
 | 
				
			||||||
	mp_irq.mp_dstirq = pin;	/* INTIN# */
 | 
						mp_irq.dstirq = pin;	/* INTIN# */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	save_mp_irq(&mp_irq);
 | 
						save_mp_irq(&mp_irq);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1037,7 +1037,7 @@ void __init mp_config_acpi_legacy_irqs(void)
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
	int ioapic;
 | 
						int ioapic;
 | 
				
			||||||
	unsigned int dstapic;
 | 
						unsigned int dstapic;
 | 
				
			||||||
	struct mp_config_intsrc mp_irq;
 | 
						struct mpc_intsrc mp_irq;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
 | 
					#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -1062,7 +1062,7 @@ void __init mp_config_acpi_legacy_irqs(void)
 | 
				
			||||||
	ioapic = mp_find_ioapic(0);
 | 
						ioapic = mp_find_ioapic(0);
 | 
				
			||||||
	if (ioapic < 0)
 | 
						if (ioapic < 0)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	dstapic = mp_ioapics[ioapic].mp_apicid;
 | 
						dstapic = mp_ioapics[ioapic].apicid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Use the default configuration for the IRQs 0-15.  Unless
 | 
						 * Use the default configuration for the IRQs 0-15.  Unless
 | 
				
			||||||
| 
						 | 
					@ -1072,16 +1072,14 @@ void __init mp_config_acpi_legacy_irqs(void)
 | 
				
			||||||
		int idx;
 | 
							int idx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for (idx = 0; idx < mp_irq_entries; idx++) {
 | 
							for (idx = 0; idx < mp_irq_entries; idx++) {
 | 
				
			||||||
			struct mp_config_intsrc *irq = mp_irqs + idx;
 | 
								struct mpc_intsrc *irq = mp_irqs + idx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			/* Do we already have a mapping for this ISA IRQ? */
 | 
								/* Do we already have a mapping for this ISA IRQ? */
 | 
				
			||||||
			if (irq->mp_srcbus == MP_ISA_BUS
 | 
								if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
 | 
				
			||||||
			    && irq->mp_srcbusirq == i)
 | 
					 | 
				
			||||||
				break;
 | 
									break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			/* Do we already have a mapping for this IOAPIC pin */
 | 
								/* Do we already have a mapping for this IOAPIC pin */
 | 
				
			||||||
			if (irq->mp_dstapic == dstapic &&
 | 
								if (irq->dstapic == dstapic && irq->dstirq == i)
 | 
				
			||||||
			    irq->mp_dstirq == i)
 | 
					 | 
				
			||||||
				break;
 | 
									break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1090,13 +1088,13 @@ void __init mp_config_acpi_legacy_irqs(void)
 | 
				
			||||||
			continue;	/* IRQ already used */
 | 
								continue;	/* IRQ already used */
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		mp_irq.mp_type = MP_INTSRC;
 | 
							mp_irq.type = MP_INTSRC;
 | 
				
			||||||
		mp_irq.mp_irqflag = 0;	/* Conforming */
 | 
							mp_irq.irqflag = 0;	/* Conforming */
 | 
				
			||||||
		mp_irq.mp_srcbus = MP_ISA_BUS;
 | 
							mp_irq.srcbus = MP_ISA_BUS;
 | 
				
			||||||
		mp_irq.mp_dstapic = dstapic;
 | 
							mp_irq.dstapic = dstapic;
 | 
				
			||||||
		mp_irq.mp_irqtype = mp_INT;
 | 
							mp_irq.irqtype = mp_INT;
 | 
				
			||||||
		mp_irq.mp_srcbusirq = i; /* Identity mapped */
 | 
							mp_irq.srcbusirq = i; /* Identity mapped */
 | 
				
			||||||
		mp_irq.mp_dstirq = i;
 | 
							mp_irq.dstirq = i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		save_mp_irq(&mp_irq);
 | 
							save_mp_irq(&mp_irq);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -1207,22 +1205,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
 | 
				
			||||||
			u32 gsi, int triggering, int polarity)
 | 
								u32 gsi, int triggering, int polarity)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_X86_MPPARSE
 | 
					#ifdef CONFIG_X86_MPPARSE
 | 
				
			||||||
	struct mp_config_intsrc mp_irq;
 | 
						struct mpc_intsrc mp_irq;
 | 
				
			||||||
	int ioapic;
 | 
						int ioapic;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!acpi_ioapic)
 | 
						if (!acpi_ioapic)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* print the entry should happen on mptable identically */
 | 
						/* print the entry should happen on mptable identically */
 | 
				
			||||||
	mp_irq.mp_type = MP_INTSRC;
 | 
						mp_irq.type = MP_INTSRC;
 | 
				
			||||||
	mp_irq.mp_irqtype = mp_INT;
 | 
						mp_irq.irqtype = mp_INT;
 | 
				
			||||||
	mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
 | 
						mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
 | 
				
			||||||
				(polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
 | 
									(polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
 | 
				
			||||||
	mp_irq.mp_srcbus = number;
 | 
						mp_irq.srcbus = number;
 | 
				
			||||||
	mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
 | 
						mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
 | 
				
			||||||
	ioapic = mp_find_ioapic(gsi);
 | 
						ioapic = mp_find_ioapic(gsi);
 | 
				
			||||||
	mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id;
 | 
						mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
 | 
				
			||||||
	mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
 | 
						mp_irq.dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	save_mp_irq(&mp_irq);
 | 
						save_mp_irq(&mp_irq);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -101,6 +101,7 @@ int acpi_save_state_mem(void)
 | 
				
			||||||
	stack_start.sp = temp_stack + sizeof(temp_stack);
 | 
						stack_start.sp = temp_stack + sizeof(temp_stack);
 | 
				
			||||||
	early_gdt_descr.address =
 | 
						early_gdt_descr.address =
 | 
				
			||||||
			(unsigned long)get_cpu_gdt_table(smp_processor_id());
 | 
								(unsigned long)get_cpu_gdt_table(smp_processor_id());
 | 
				
			||||||
 | 
						initial_gs = per_cpu_offset(smp_processor_id());
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	initial_code = (unsigned long)wakeup_long64;
 | 
						initial_code = (unsigned long)wakeup_long64;
 | 
				
			||||||
	saved_magic = 0x123456789abcdef0;
 | 
						saved_magic = 0x123456789abcdef0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -60,6 +60,24 @@
 | 
				
			||||||
# error SPURIOUS_APIC_VECTOR definition error
 | 
					# error SPURIOUS_APIC_VECTOR definition error
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					unsigned int num_processors;
 | 
				
			||||||
 | 
					unsigned disabled_cpus __cpuinitdata;
 | 
				
			||||||
 | 
					/* Processor that is doing the boot up */
 | 
				
			||||||
 | 
					unsigned int boot_cpu_physical_apicid = -1U;
 | 
				
			||||||
 | 
					EXPORT_SYMBOL(boot_cpu_physical_apicid);
 | 
				
			||||||
 | 
					unsigned int max_physical_apicid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Bitmask of physically existing CPUs */
 | 
				
			||||||
 | 
					physid_mask_t phys_cpu_present_map;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Map cpu index to physical APIC ID
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
 | 
				
			||||||
 | 
					DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
 | 
				
			||||||
 | 
					EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
 | 
				
			||||||
 | 
					EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Knob to control our willingness to enable the local APIC.
 | 
					 * Knob to control our willingness to enable the local APIC.
 | 
				
			||||||
| 
						 | 
					@ -1130,6 +1148,13 @@ void __cpuinit setup_local_APIC(void)
 | 
				
			||||||
	unsigned int value;
 | 
						unsigned int value;
 | 
				
			||||||
	int i, j;
 | 
						int i, j;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (disable_apic) {
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_IO_APIC
 | 
				
			||||||
 | 
							disable_ioapic_setup();
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
	/* Pound the ESR really hard over the head with a big hammer - mbligh */
 | 
						/* Pound the ESR really hard over the head with a big hammer - mbligh */
 | 
				
			||||||
	if (lapic_is_integrated() && esr_disable) {
 | 
						if (lapic_is_integrated() && esr_disable) {
 | 
				
			||||||
| 
						 | 
					@ -1570,11 +1595,11 @@ int apic_version[MAX_APICS];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int __init APIC_init_uniprocessor(void)
 | 
					int __init APIC_init_uniprocessor(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
					 | 
				
			||||||
	if (disable_apic) {
 | 
						if (disable_apic) {
 | 
				
			||||||
		pr_info("Apic disabled\n");
 | 
							pr_info("Apic disabled\n");
 | 
				
			||||||
		return -1;
 | 
							return -1;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_64
 | 
				
			||||||
	if (!cpu_has_apic) {
 | 
						if (!cpu_has_apic) {
 | 
				
			||||||
		disable_apic = 1;
 | 
							disable_apic = 1;
 | 
				
			||||||
		pr_info("Apic disabled by BIOS\n");
 | 
							pr_info("Apic disabled by BIOS\n");
 | 
				
			||||||
| 
						 | 
					@ -1877,17 +1902,8 @@ void __cpuinit generic_processor_info(int apicid, int version)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
 | 
					#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
 | 
				
			||||||
	/* are we being called early in kernel startup? */
 | 
						early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
 | 
				
			||||||
	if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
 | 
						early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
 | 
				
			||||||
		u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
 | 
					 | 
				
			||||||
		u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		cpu_to_apicid[cpu] = apicid;
 | 
					 | 
				
			||||||
		bios_cpu_apicid[cpu] = apicid;
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		per_cpu(x86_cpu_to_apicid, cpu) = apicid;
 | 
					 | 
				
			||||||
		per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_cpu_possible(cpu, true);
 | 
						set_cpu_possible(cpu, true);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -11,7 +11,6 @@
 | 
				
			||||||
#include <linux/hardirq.h>
 | 
					#include <linux/hardirq.h>
 | 
				
			||||||
#include <linux/suspend.h>
 | 
					#include <linux/suspend.h>
 | 
				
			||||||
#include <linux/kbuild.h>
 | 
					#include <linux/kbuild.h>
 | 
				
			||||||
#include <asm/pda.h>
 | 
					 | 
				
			||||||
#include <asm/processor.h>
 | 
					#include <asm/processor.h>
 | 
				
			||||||
#include <asm/segment.h>
 | 
					#include <asm/segment.h>
 | 
				
			||||||
#include <asm/thread_info.h>
 | 
					#include <asm/thread_info.h>
 | 
				
			||||||
| 
						 | 
					@ -48,16 +47,6 @@ int main(void)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	BLANK();
 | 
						BLANK();
 | 
				
			||||||
#undef ENTRY
 | 
					#undef ENTRY
 | 
				
			||||||
#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
 | 
					 | 
				
			||||||
	ENTRY(kernelstack); 
 | 
					 | 
				
			||||||
	ENTRY(oldrsp); 
 | 
					 | 
				
			||||||
	ENTRY(pcurrent); 
 | 
					 | 
				
			||||||
	ENTRY(irqcount);
 | 
					 | 
				
			||||||
	ENTRY(cpunumber);
 | 
					 | 
				
			||||||
	ENTRY(irqstackptr);
 | 
					 | 
				
			||||||
	ENTRY(data_offset);
 | 
					 | 
				
			||||||
	BLANK();
 | 
					 | 
				
			||||||
#undef ENTRY
 | 
					 | 
				
			||||||
#ifdef CONFIG_PARAVIRT
 | 
					#ifdef CONFIG_PARAVIRT
 | 
				
			||||||
	BLANK();
 | 
						BLANK();
 | 
				
			||||||
	OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
 | 
						OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -21,14 +21,16 @@
 | 
				
			||||||
#include <asm/asm.h>
 | 
					#include <asm/asm.h>
 | 
				
			||||||
#include <asm/numa.h>
 | 
					#include <asm/numa.h>
 | 
				
			||||||
#include <asm/smp.h>
 | 
					#include <asm/smp.h>
 | 
				
			||||||
 | 
					#include <asm/cpu.h>
 | 
				
			||||||
 | 
					#include <asm/cpumask.h>
 | 
				
			||||||
#ifdef CONFIG_X86_LOCAL_APIC
 | 
					#ifdef CONFIG_X86_LOCAL_APIC
 | 
				
			||||||
#include <asm/mpspec.h>
 | 
					#include <asm/mpspec.h>
 | 
				
			||||||
#include <asm/apic.h>
 | 
					#include <asm/apic.h>
 | 
				
			||||||
#include <mach_apic.h>
 | 
					#include <mach_apic.h>
 | 
				
			||||||
#include <asm/genapic.h>
 | 
					#include <asm/genapic.h>
 | 
				
			||||||
 | 
					#include <asm/uv/uv.h>
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <asm/pda.h>
 | 
					 | 
				
			||||||
#include <asm/pgtable.h>
 | 
					#include <asm/pgtable.h>
 | 
				
			||||||
#include <asm/processor.h>
 | 
					#include <asm/processor.h>
 | 
				
			||||||
#include <asm/desc.h>
 | 
					#include <asm/desc.h>
 | 
				
			||||||
| 
						 | 
					@ -50,6 +52,15 @@ cpumask_var_t cpu_initialized_mask;
 | 
				
			||||||
/* representing cpus for which sibling maps can be computed */
 | 
					/* representing cpus for which sibling maps can be computed */
 | 
				
			||||||
cpumask_var_t cpu_sibling_setup_mask;
 | 
					cpumask_var_t cpu_sibling_setup_mask;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* correctly size the local cpu masks */
 | 
				
			||||||
 | 
					void __init setup_cpu_local_masks(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						alloc_bootmem_cpumask_var(&cpu_initialized_mask);
 | 
				
			||||||
 | 
						alloc_bootmem_cpumask_var(&cpu_callin_mask);
 | 
				
			||||||
 | 
						alloc_bootmem_cpumask_var(&cpu_callout_mask);
 | 
				
			||||||
 | 
						alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else /* CONFIG_X86_32 */
 | 
					#else /* CONFIG_X86_32 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
cpumask_t cpu_callin_map;
 | 
					cpumask_t cpu_callin_map;
 | 
				
			||||||
| 
						 | 
					@ -62,23 +73,23 @@ cpumask_t cpu_sibling_setup_map;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct cpu_dev *this_cpu __cpuinitdata;
 | 
					static struct cpu_dev *this_cpu __cpuinitdata;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
					#ifdef CONFIG_X86_64
 | 
				
			||||||
/* We need valid kernel segments for data and code in long mode too
 | 
						/*
 | 
				
			||||||
 | 
						 * We need valid kernel segments for data and code in long mode too
 | 
				
			||||||
	 * IRET will check the segment types  kkeil 2000/10/28
 | 
						 * IRET will check the segment types  kkeil 2000/10/28
 | 
				
			||||||
	 * Also sysret mandates a special GDT layout
 | 
						 * Also sysret mandates a special GDT layout
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * The TLS descriptors are currently at a different place compared to i386.
 | 
				
			||||||
 | 
						 * Hopefully nobody expects them at a fixed place (Wine?)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
/* The TLS descriptors are currently at a different place compared to i386.
 | 
					 | 
				
			||||||
   Hopefully nobody expects them at a fixed place (Wine?) */
 | 
					 | 
				
			||||||
DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
 | 
					 | 
				
			||||||
	[GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
 | 
						[GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
 | 
				
			||||||
	[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
 | 
						[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
 | 
				
			||||||
	[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
 | 
						[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
 | 
				
			||||||
	[GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
 | 
						[GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
 | 
				
			||||||
	[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
 | 
						[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
 | 
				
			||||||
	[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
 | 
						[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
 | 
				
			||||||
} };
 | 
					 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 | 
					 | 
				
			||||||
	[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
 | 
						[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
 | 
				
			||||||
	[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
 | 
						[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
 | 
				
			||||||
	[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
 | 
						[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
 | 
				
			||||||
| 
						 | 
					@ -110,9 +121,9 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 | 
				
			||||||
	[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
 | 
						[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	[GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
 | 
						[GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
 | 
				
			||||||
	[GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
 | 
						[GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
 | 
				
			||||||
} };
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					} };
 | 
				
			||||||
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 | 
					EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
| 
						 | 
					@ -247,12 +258,17 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
 | 
				
			||||||
void switch_to_new_gdt(void)
 | 
					void switch_to_new_gdt(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct desc_ptr gdt_descr;
 | 
						struct desc_ptr gdt_descr;
 | 
				
			||||||
 | 
						int cpu = smp_processor_id();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
 | 
						gdt_descr.address = (long)get_cpu_gdt_table(cpu);
 | 
				
			||||||
	gdt_descr.size = GDT_SIZE - 1;
 | 
						gdt_descr.size = GDT_SIZE - 1;
 | 
				
			||||||
	load_gdt(&gdt_descr);
 | 
						load_gdt(&gdt_descr);
 | 
				
			||||||
 | 
						/* Reload the per-cpu base */
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
	asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
 | 
						loadsegment(fs, __KERNEL_PERCPU);
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
						loadsegment(gs, 0);
 | 
				
			||||||
 | 
						wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -877,54 +893,26 @@ static __init int setup_disablecpuid(char *arg)
 | 
				
			||||||
__setup("clearcpuid=", setup_disablecpuid);
 | 
					__setup("clearcpuid=", setup_disablecpuid);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
					#ifdef CONFIG_X86_64
 | 
				
			||||||
struct x8664_pda **_cpu_pda __read_mostly;
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(_cpu_pda);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
 | 
					struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
 | 
					DEFINE_PER_CPU_FIRST(union irq_stack_union,
 | 
				
			||||||
 | 
							     irq_stack_union) __aligned(PAGE_SIZE);
 | 
				
			||||||
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
 | 
					DEFINE_PER_CPU(char *, irq_stack_ptr);	/* will be set during per cpu init */
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					DEFINE_PER_CPU(char *, irq_stack_ptr) =
 | 
				
			||||||
 | 
						per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __cpuinit pda_init(int cpu)
 | 
					DEFINE_PER_CPU(unsigned long, kernel_stack) =
 | 
				
			||||||
{
 | 
						(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
 | 
				
			||||||
	struct x8664_pda *pda = cpu_pda(cpu);
 | 
					EXPORT_PER_CPU_SYMBOL(kernel_stack);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Setup up data that may be needed in __get_free_pages early */
 | 
					DEFINE_PER_CPU(unsigned int, irq_count) = -1;
 | 
				
			||||||
	loadsegment(fs, 0);
 | 
					 | 
				
			||||||
	loadsegment(gs, 0);
 | 
					 | 
				
			||||||
	/* Memory clobbers used to order PDA accessed */
 | 
					 | 
				
			||||||
	mb();
 | 
					 | 
				
			||||||
	wrmsrl(MSR_GS_BASE, pda);
 | 
					 | 
				
			||||||
	mb();
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pda->cpunumber = cpu;
 | 
					static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
 | 
				
			||||||
	pda->irqcount = -1;
 | 
						[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
 | 
				
			||||||
	pda->kernelstack = (unsigned long)stack_thread_info() -
 | 
						__aligned(PAGE_SIZE);
 | 
				
			||||||
				 PDA_STACKOFFSET + THREAD_SIZE;
 | 
					 | 
				
			||||||
	pda->active_mm = &init_mm;
 | 
					 | 
				
			||||||
	pda->mmu_state = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (cpu == 0) {
 | 
					 | 
				
			||||||
		/* others are initialized in smpboot.c */
 | 
					 | 
				
			||||||
		pda->pcurrent = &init_task;
 | 
					 | 
				
			||||||
		pda->irqstackptr = boot_cpu_stack;
 | 
					 | 
				
			||||||
		pda->irqstackptr += IRQSTACKSIZE - 64;
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		if (!pda->irqstackptr) {
 | 
					 | 
				
			||||||
			pda->irqstackptr = (char *)
 | 
					 | 
				
			||||||
				__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
 | 
					 | 
				
			||||||
			if (!pda->irqstackptr)
 | 
					 | 
				
			||||||
				panic("cannot allocate irqstack for cpu %d",
 | 
					 | 
				
			||||||
				      cpu);
 | 
					 | 
				
			||||||
			pda->irqstackptr += IRQSTACKSIZE - 64;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
 | 
					 | 
				
			||||||
			pda->nodenumber = cpu_to_node(cpu);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
 | 
					 | 
				
			||||||
				  DEBUG_STKSZ] __page_aligned_bss;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern asmlinkage void ignore_sysret(void);
 | 
					extern asmlinkage void ignore_sysret(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -982,15 +970,14 @@ void __cpuinit cpu_init(void)
 | 
				
			||||||
	struct tss_struct *t = &per_cpu(init_tss, cpu);
 | 
						struct tss_struct *t = &per_cpu(init_tss, cpu);
 | 
				
			||||||
	struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
 | 
						struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
 | 
				
			||||||
	unsigned long v;
 | 
						unsigned long v;
 | 
				
			||||||
	char *estacks = NULL;
 | 
					 | 
				
			||||||
	struct task_struct *me;
 | 
						struct task_struct *me;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* CPU 0 is initialised in head64.c */
 | 
					#ifdef CONFIG_NUMA
 | 
				
			||||||
	if (cpu != 0)
 | 
						if (cpu != 0 && percpu_read(node_number) == 0 &&
 | 
				
			||||||
		pda_init(cpu);
 | 
						    cpu_to_node(cpu) != NUMA_NO_NODE)
 | 
				
			||||||
	else
 | 
							percpu_write(node_number, cpu_to_node(cpu));
 | 
				
			||||||
		estacks = boot_exception_stacks;
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	me = current;
 | 
						me = current;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1007,6 +994,8 @@ void __cpuinit cpu_init(void)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch_to_new_gdt();
 | 
						switch_to_new_gdt();
 | 
				
			||||||
 | 
						loadsegment(fs, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	load_idt((const struct desc_ptr *)&idt_descr);
 | 
						load_idt((const struct desc_ptr *)&idt_descr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
 | 
						memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
 | 
				
			||||||
| 
						 | 
					@ -1024,18 +1013,13 @@ void __cpuinit cpu_init(void)
 | 
				
			||||||
	 * set up and load the per-CPU TSS
 | 
						 * set up and load the per-CPU TSS
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (!orig_ist->ist[0]) {
 | 
						if (!orig_ist->ist[0]) {
 | 
				
			||||||
		static const unsigned int order[N_EXCEPTION_STACKS] = {
 | 
							static const unsigned int sizes[N_EXCEPTION_STACKS] = {
 | 
				
			||||||
		  [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
 | 
							  [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
 | 
				
			||||||
		  [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
 | 
							  [DEBUG_STACK - 1] = DEBUG_STKSZ
 | 
				
			||||||
		};
 | 
							};
 | 
				
			||||||
 | 
							char *estacks = per_cpu(exception_stacks, cpu);
 | 
				
			||||||
		for (v = 0; v < N_EXCEPTION_STACKS; v++) {
 | 
							for (v = 0; v < N_EXCEPTION_STACKS; v++) {
 | 
				
			||||||
			if (cpu) {
 | 
								estacks += sizes[v];
 | 
				
			||||||
				estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
 | 
					 | 
				
			||||||
				if (!estacks)
 | 
					 | 
				
			||||||
					panic("Cannot allocate exception "
 | 
					 | 
				
			||||||
					      "stack %ld %d\n", v, cpu);
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			estacks += PAGE_SIZE << order[v];
 | 
					 | 
				
			||||||
			orig_ist->ist[v] = t->x86_tss.ist[v] =
 | 
								orig_ist->ist[v] = t->x86_tss.ist[v] =
 | 
				
			||||||
					(unsigned long)estacks;
 | 
										(unsigned long)estacks;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -132,7 +132,16 @@ struct _cpuid4_info {
 | 
				
			||||||
	union _cpuid4_leaf_ecx ecx;
 | 
						union _cpuid4_leaf_ecx ecx;
 | 
				
			||||||
	unsigned long size;
 | 
						unsigned long size;
 | 
				
			||||||
	unsigned long can_disable;
 | 
						unsigned long can_disable;
 | 
				
			||||||
	cpumask_t shared_cpu_map;	/* future?: only cpus/node is needed */
 | 
						DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* subset of above _cpuid4_info w/o shared_cpu_map */
 | 
				
			||||||
 | 
					struct _cpuid4_info_regs {
 | 
				
			||||||
 | 
						union _cpuid4_leaf_eax eax;
 | 
				
			||||||
 | 
						union _cpuid4_leaf_ebx ebx;
 | 
				
			||||||
 | 
						union _cpuid4_leaf_ecx ecx;
 | 
				
			||||||
 | 
						unsigned long size;
 | 
				
			||||||
 | 
						unsigned long can_disable;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_PCI
 | 
					#ifdef CONFIG_PCI
 | 
				
			||||||
| 
						 | 
					@ -263,7 +272,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void __cpuinit
 | 
					static void __cpuinit
 | 
				
			||||||
amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
 | 
					amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (index < 3)
 | 
						if (index < 3)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
| 
						 | 
					@ -271,7 +280,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int
 | 
					static int
 | 
				
			||||||
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
 | 
					__cpuinit cpuid4_cache_lookup_regs(int index,
 | 
				
			||||||
 | 
									   struct _cpuid4_info_regs *this_leaf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	union _cpuid4_leaf_eax 	eax;
 | 
						union _cpuid4_leaf_eax 	eax;
 | 
				
			||||||
	union _cpuid4_leaf_ebx 	ebx;
 | 
						union _cpuid4_leaf_ebx 	ebx;
 | 
				
			||||||
| 
						 | 
					@ -299,6 +309,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int
 | 
				
			||||||
 | 
					__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct _cpuid4_info_regs *leaf_regs =
 | 
				
			||||||
 | 
							(struct _cpuid4_info_regs *)this_leaf;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return cpuid4_cache_lookup_regs(index, leaf_regs);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __cpuinit find_num_cache_leaves(void)
 | 
					static int __cpuinit find_num_cache_leaves(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int		eax, ebx, ecx, edx;
 | 
						unsigned int		eax, ebx, ecx, edx;
 | 
				
			||||||
| 
						 | 
					@ -338,11 +357,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
 | 
				
			||||||
		 * parameters cpuid leaf to find the cache details
 | 
							 * parameters cpuid leaf to find the cache details
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		for (i = 0; i < num_cache_leaves; i++) {
 | 
							for (i = 0; i < num_cache_leaves; i++) {
 | 
				
			||||||
			struct _cpuid4_info this_leaf;
 | 
								struct _cpuid4_info_regs this_leaf;
 | 
				
			||||||
 | 
					 | 
				
			||||||
			int retval;
 | 
								int retval;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			retval = cpuid4_cache_lookup(i, &this_leaf);
 | 
								retval = cpuid4_cache_lookup_regs(i, &this_leaf);
 | 
				
			||||||
			if (retval >= 0) {
 | 
								if (retval >= 0) {
 | 
				
			||||||
				switch(this_leaf.eax.split.level) {
 | 
									switch(this_leaf.eax.split.level) {
 | 
				
			||||||
				    case 1:
 | 
									    case 1:
 | 
				
			||||||
| 
						 | 
					@ -491,17 +509,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
 | 
				
			||||||
	num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
 | 
						num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (num_threads_sharing == 1)
 | 
						if (num_threads_sharing == 1)
 | 
				
			||||||
		cpu_set(cpu, this_leaf->shared_cpu_map);
 | 
							cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
 | 
				
			||||||
	else {
 | 
						else {
 | 
				
			||||||
		index_msb = get_count_order(num_threads_sharing);
 | 
							index_msb = get_count_order(num_threads_sharing);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for_each_online_cpu(i) {
 | 
							for_each_online_cpu(i) {
 | 
				
			||||||
			if (cpu_data(i).apicid >> index_msb ==
 | 
								if (cpu_data(i).apicid >> index_msb ==
 | 
				
			||||||
			    c->apicid >> index_msb) {
 | 
								    c->apicid >> index_msb) {
 | 
				
			||||||
				cpu_set(i, this_leaf->shared_cpu_map);
 | 
									cpumask_set_cpu(i,
 | 
				
			||||||
 | 
										to_cpumask(this_leaf->shared_cpu_map));
 | 
				
			||||||
				if (i != cpu && per_cpu(cpuid4_info, i))  {
 | 
									if (i != cpu && per_cpu(cpuid4_info, i))  {
 | 
				
			||||||
					sibling_leaf = CPUID4_INFO_IDX(i, index);
 | 
										sibling_leaf =
 | 
				
			||||||
					cpu_set(cpu, sibling_leaf->shared_cpu_map);
 | 
											CPUID4_INFO_IDX(i, index);
 | 
				
			||||||
 | 
										cpumask_set_cpu(cpu, to_cpumask(
 | 
				
			||||||
 | 
											sibling_leaf->shared_cpu_map));
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -513,9 +534,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
 | 
				
			||||||
	int sibling;
 | 
						int sibling;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	this_leaf = CPUID4_INFO_IDX(cpu, index);
 | 
						this_leaf = CPUID4_INFO_IDX(cpu, index);
 | 
				
			||||||
	for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
 | 
						for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
 | 
				
			||||||
		sibling_leaf = CPUID4_INFO_IDX(sibling, index);
 | 
							sibling_leaf = CPUID4_INFO_IDX(sibling, index);
 | 
				
			||||||
		cpu_clear(cpu, sibling_leaf->shared_cpu_map);
 | 
							cpumask_clear_cpu(cpu,
 | 
				
			||||||
 | 
									  to_cpumask(sibling_leaf->shared_cpu_map));
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
| 
						 | 
					@ -620,8 +642,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
 | 
				
			||||||
	int n = 0;
 | 
						int n = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (len > 1) {
 | 
						if (len > 1) {
 | 
				
			||||||
		cpumask_t *mask = &this_leaf->shared_cpu_map;
 | 
							const struct cpumask *mask;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							mask = to_cpumask(this_leaf->shared_cpu_map);
 | 
				
			||||||
		n = type?
 | 
							n = type?
 | 
				
			||||||
			cpulist_scnprintf(buf, len-2, mask) :
 | 
								cpulist_scnprintf(buf, len-2, mask) :
 | 
				
			||||||
			cpumask_scnprintf(buf, len-2, mask);
 | 
								cpumask_scnprintf(buf, len-2, mask);
 | 
				
			||||||
| 
						 | 
					@ -684,7 +707,8 @@ static struct pci_dev *get_k8_northbridge(int node)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
 | 
					static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
 | 
						const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
 | 
				
			||||||
 | 
						int node = cpu_to_node(cpumask_first(mask));
 | 
				
			||||||
	struct pci_dev *dev = NULL;
 | 
						struct pci_dev *dev = NULL;
 | 
				
			||||||
	ssize_t ret = 0;
 | 
						ssize_t ret = 0;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
| 
						 | 
					@ -718,7 +742,8 @@ static ssize_t
 | 
				
			||||||
store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
 | 
					store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
 | 
				
			||||||
		    size_t count)
 | 
							    size_t count)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
 | 
						const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
 | 
				
			||||||
 | 
						int node = cpu_to_node(cpumask_first(mask));
 | 
				
			||||||
	struct pci_dev *dev = NULL;
 | 
						struct pci_dev *dev = NULL;
 | 
				
			||||||
	unsigned int ret, index, val;
 | 
						unsigned int ret, index, val;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -863,7 +888,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
 | 
				
			||||||
	return -ENOMEM;
 | 
						return -ENOMEM;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static cpumask_t cache_dev_map = CPU_MASK_NONE;
 | 
					static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Add/Remove cache interface for CPU device */
 | 
					/* Add/Remove cache interface for CPU device */
 | 
				
			||||||
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
 | 
					static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
 | 
				
			||||||
| 
						 | 
					@ -903,7 +928,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		kobject_uevent(&(this_object->kobj), KOBJ_ADD);
 | 
							kobject_uevent(&(this_object->kobj), KOBJ_ADD);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	cpu_set(cpu, cache_dev_map);
 | 
						cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
 | 
						kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					@ -916,9 +941,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (per_cpu(cpuid4_info, cpu) == NULL)
 | 
						if (per_cpu(cpuid4_info, cpu) == NULL)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	if (!cpu_isset(cpu, cache_dev_map))
 | 
						if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	cpu_clear(cpu, cache_dev_map);
 | 
						cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < num_cache_leaves; i++)
 | 
						for (i = 0; i < num_cache_leaves; i++)
 | 
				
			||||||
		kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
 | 
							kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
 | 
				
			||||||
struct threshold_bank {
 | 
					struct threshold_bank {
 | 
				
			||||||
	struct kobject *kobj;
 | 
						struct kobject *kobj;
 | 
				
			||||||
	struct threshold_block *blocks;
 | 
						struct threshold_block *blocks;
 | 
				
			||||||
	cpumask_t cpus;
 | 
						cpumask_var_t cpus;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
 | 
					static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
	if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {	/* symlink */
 | 
						if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {	/* symlink */
 | 
				
			||||||
		i = first_cpu(per_cpu(cpu_core_map, cpu));
 | 
							i = cpumask_first(&per_cpu(cpu_core_map, cpu));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* first core not up yet */
 | 
							/* first core not up yet */
 | 
				
			||||||
		if (cpu_data(i).cpu_core_id)
 | 
							if (cpu_data(i).cpu_core_id)
 | 
				
			||||||
| 
						 | 
					@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 | 
				
			||||||
		if (err)
 | 
							if (err)
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		b->cpus = per_cpu(cpu_core_map, cpu);
 | 
							cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
 | 
				
			||||||
		per_cpu(threshold_banks, cpu)[bank] = b;
 | 
							per_cpu(threshold_banks, cpu)[bank] = b;
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 | 
				
			||||||
		err = -ENOMEM;
 | 
							err = -ENOMEM;
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
 | 
				
			||||||
 | 
							kfree(b);
 | 
				
			||||||
 | 
							err = -ENOMEM;
 | 
				
			||||||
 | 
							goto out;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
 | 
						b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
 | 
				
			||||||
	if (!b->kobj)
 | 
						if (!b->kobj)
 | 
				
			||||||
		goto out_free;
 | 
							goto out_free;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifndef CONFIG_SMP
 | 
					#ifndef CONFIG_SMP
 | 
				
			||||||
	b->cpus = CPU_MASK_ALL;
 | 
						cpumask_setall(b->cpus);
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
	b->cpus = per_cpu(cpu_core_map, cpu);
 | 
						cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	per_cpu(threshold_banks, cpu)[bank] = b;
 | 
						per_cpu(threshold_banks, cpu)[bank] = b;
 | 
				
			||||||
| 
						 | 
					@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		goto out_free;
 | 
							goto out_free;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_cpu_mask_nr(i, b->cpus) {
 | 
						for_each_cpu(i, b->cpus) {
 | 
				
			||||||
		if (i == cpu)
 | 
							if (i == cpu)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out_free:
 | 
					out_free:
 | 
				
			||||||
	per_cpu(threshold_banks, cpu)[bank] = NULL;
 | 
						per_cpu(threshold_banks, cpu)[bank] = NULL;
 | 
				
			||||||
 | 
						free_cpumask_var(b->cpus);
 | 
				
			||||||
	kfree(b);
 | 
						kfree(b);
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	return err;
 | 
						return err;
 | 
				
			||||||
| 
						 | 
					@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* remove all sibling symlinks before unregistering */
 | 
						/* remove all sibling symlinks before unregistering */
 | 
				
			||||||
	for_each_cpu_mask_nr(i, b->cpus) {
 | 
						for_each_cpu(i, b->cpus) {
 | 
				
			||||||
		if (i == cpu)
 | 
							if (i == cpu)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 | 
				
			||||||
free_out:
 | 
					free_out:
 | 
				
			||||||
	kobject_del(b->kobj);
 | 
						kobject_del(b->kobj);
 | 
				
			||||||
	kobject_put(b->kobj);
 | 
						kobject_put(b->kobj);
 | 
				
			||||||
 | 
						free_cpumask_var(b->cpus);
 | 
				
			||||||
	kfree(b);
 | 
						kfree(b);
 | 
				
			||||||
	per_cpu(threshold_banks, cpu)[bank] = NULL;
 | 
						per_cpu(threshold_banks, cpu)[bank] = NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -7,6 +7,7 @@
 | 
				
			||||||
#include <linux/interrupt.h>
 | 
					#include <linux/interrupt.h>
 | 
				
			||||||
#include <linux/percpu.h>
 | 
					#include <linux/percpu.h>
 | 
				
			||||||
#include <asm/processor.h>
 | 
					#include <asm/processor.h>
 | 
				
			||||||
 | 
					#include <asm/apic.h>
 | 
				
			||||||
#include <asm/msr.h>
 | 
					#include <asm/msr.h>
 | 
				
			||||||
#include <asm/mce.h>
 | 
					#include <asm/mce.h>
 | 
				
			||||||
#include <asm/hw_irq.h>
 | 
					#include <asm/hw_irq.h>
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -24,7 +24,7 @@
 | 
				
			||||||
#include <asm/apic.h>
 | 
					#include <asm/apic.h>
 | 
				
			||||||
#include <asm/hpet.h>
 | 
					#include <asm/hpet.h>
 | 
				
			||||||
#include <linux/kdebug.h>
 | 
					#include <linux/kdebug.h>
 | 
				
			||||||
#include <asm/smp.h>
 | 
					#include <asm/cpu.h>
 | 
				
			||||||
#include <asm/reboot.h>
 | 
					#include <asm/reboot.h>
 | 
				
			||||||
#include <asm/virtext.h>
 | 
					#include <asm/virtext.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -106,7 +106,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 | 
				
			||||||
		const struct stacktrace_ops *ops, void *data)
 | 
							const struct stacktrace_ops *ops, void *data)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	const unsigned cpu = get_cpu();
 | 
						const unsigned cpu = get_cpu();
 | 
				
			||||||
	unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
 | 
						unsigned long *irq_stack_end =
 | 
				
			||||||
 | 
							(unsigned long *)per_cpu(irq_stack_ptr, cpu);
 | 
				
			||||||
	unsigned used = 0;
 | 
						unsigned used = 0;
 | 
				
			||||||
	struct thread_info *tinfo;
 | 
						struct thread_info *tinfo;
 | 
				
			||||||
	int graph = 0;
 | 
						int graph = 0;
 | 
				
			||||||
| 
						 | 
					@ -160,23 +161,23 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 | 
				
			||||||
			stack = (unsigned long *) estack_end[-2];
 | 
								stack = (unsigned long *) estack_end[-2];
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if (irqstack_end) {
 | 
							if (irq_stack_end) {
 | 
				
			||||||
			unsigned long *irqstack;
 | 
								unsigned long *irq_stack;
 | 
				
			||||||
			irqstack = irqstack_end -
 | 
								irq_stack = irq_stack_end -
 | 
				
			||||||
				(IRQSTACKSIZE - 64) / sizeof(*irqstack);
 | 
									(IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (stack >= irqstack && stack < irqstack_end) {
 | 
								if (stack >= irq_stack && stack < irq_stack_end) {
 | 
				
			||||||
				if (ops->stack(data, "IRQ") < 0)
 | 
									if (ops->stack(data, "IRQ") < 0)
 | 
				
			||||||
					break;
 | 
										break;
 | 
				
			||||||
				bp = print_context_stack(tinfo, stack, bp,
 | 
									bp = print_context_stack(tinfo, stack, bp,
 | 
				
			||||||
					ops, data, irqstack_end, &graph);
 | 
										ops, data, irq_stack_end, &graph);
 | 
				
			||||||
				/*
 | 
									/*
 | 
				
			||||||
				 * We link to the next stack (which would be
 | 
									 * We link to the next stack (which would be
 | 
				
			||||||
				 * the process stack normally) the last
 | 
									 * the process stack normally) the last
 | 
				
			||||||
				 * pointer (index -1 to end) in the IRQ stack:
 | 
									 * pointer (index -1 to end) in the IRQ stack:
 | 
				
			||||||
				 */
 | 
									 */
 | 
				
			||||||
				stack = (unsigned long *) (irqstack_end[-1]);
 | 
									stack = (unsigned long *) (irq_stack_end[-1]);
 | 
				
			||||||
				irqstack_end = NULL;
 | 
									irq_stack_end = NULL;
 | 
				
			||||||
				ops->stack(data, "EOI");
 | 
									ops->stack(data, "EOI");
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
| 
						 | 
					@ -199,10 +200,10 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
 | 
				
			||||||
	unsigned long *stack;
 | 
						unsigned long *stack;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
	const int cpu = smp_processor_id();
 | 
						const int cpu = smp_processor_id();
 | 
				
			||||||
	unsigned long *irqstack_end =
 | 
						unsigned long *irq_stack_end =
 | 
				
			||||||
		(unsigned long *) (cpu_pda(cpu)->irqstackptr);
 | 
							(unsigned long *)(per_cpu(irq_stack_ptr, cpu));
 | 
				
			||||||
	unsigned long *irqstack =
 | 
						unsigned long *irq_stack =
 | 
				
			||||||
		(unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
 | 
							(unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * debugging aid: "show_stack(NULL, NULL);" prints the
 | 
						 * debugging aid: "show_stack(NULL, NULL);" prints the
 | 
				
			||||||
| 
						 | 
					@ -218,9 +219,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	stack = sp;
 | 
						stack = sp;
 | 
				
			||||||
	for (i = 0; i < kstack_depth_to_print; i++) {
 | 
						for (i = 0; i < kstack_depth_to_print; i++) {
 | 
				
			||||||
		if (stack >= irqstack && stack <= irqstack_end) {
 | 
							if (stack >= irq_stack && stack <= irq_stack_end) {
 | 
				
			||||||
			if (stack == irqstack_end) {
 | 
								if (stack == irq_stack_end) {
 | 
				
			||||||
				stack = (unsigned long *) (irqstack_end[-1]);
 | 
									stack = (unsigned long *) (irq_stack_end[-1]);
 | 
				
			||||||
				printk(" <EOI> ");
 | 
									printk(" <EOI> ");
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
| 
						 | 
					@ -241,7 +242,7 @@ void show_registers(struct pt_regs *regs)
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
	unsigned long sp;
 | 
						unsigned long sp;
 | 
				
			||||||
	const int cpu = smp_processor_id();
 | 
						const int cpu = smp_processor_id();
 | 
				
			||||||
	struct task_struct *cur = cpu_pda(cpu)->pcurrent;
 | 
						struct task_struct *cur = current;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sp = regs->sp;
 | 
						sp = regs->sp;
 | 
				
			||||||
	printk("CPU %d ", cpu);
 | 
						printk("CPU %d ", cpu);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -366,10 +366,12 @@ void __init efi_init(void)
 | 
				
			||||||
					SMBIOS_TABLE_GUID)) {
 | 
										SMBIOS_TABLE_GUID)) {
 | 
				
			||||||
			efi.smbios = config_tables[i].table;
 | 
								efi.smbios = config_tables[i].table;
 | 
				
			||||||
			printk(" SMBIOS=0x%lx ", config_tables[i].table);
 | 
								printk(" SMBIOS=0x%lx ", config_tables[i].table);
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_UV
 | 
				
			||||||
		} else if (!efi_guidcmp(config_tables[i].guid,
 | 
							} else if (!efi_guidcmp(config_tables[i].guid,
 | 
				
			||||||
					UV_SYSTEM_TABLE_GUID)) {
 | 
										UV_SYSTEM_TABLE_GUID)) {
 | 
				
			||||||
			efi.uv_systab = config_tables[i].table;
 | 
								efi.uv_systab = config_tables[i].table;
 | 
				
			||||||
			printk(" UVsystab=0x%lx ", config_tables[i].table);
 | 
								printk(" UVsystab=0x%lx ", config_tables[i].table);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
		} else if (!efi_guidcmp(config_tables[i].guid,
 | 
							} else if (!efi_guidcmp(config_tables[i].guid,
 | 
				
			||||||
					HCDP_TABLE_GUID)) {
 | 
										HCDP_TABLE_GUID)) {
 | 
				
			||||||
			efi.hcdp = config_tables[i].table;
 | 
								efi.hcdp = config_tables[i].table;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -36,6 +36,7 @@
 | 
				
			||||||
#include <asm/proto.h>
 | 
					#include <asm/proto.h>
 | 
				
			||||||
#include <asm/efi.h>
 | 
					#include <asm/efi.h>
 | 
				
			||||||
#include <asm/cacheflush.h>
 | 
					#include <asm/cacheflush.h>
 | 
				
			||||||
 | 
					#include <asm/fixmap.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static pgd_t save_pgd __initdata;
 | 
					static pgd_t save_pgd __initdata;
 | 
				
			||||||
static unsigned long efi_flags __initdata;
 | 
					static unsigned long efi_flags __initdata;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -672,7 +672,7 @@ common_interrupt:
 | 
				
			||||||
ENDPROC(common_interrupt)
 | 
					ENDPROC(common_interrupt)
 | 
				
			||||||
	CFI_ENDPROC
 | 
						CFI_ENDPROC
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define BUILD_INTERRUPT(name, nr)	\
 | 
					#define BUILD_INTERRUPT3(name, nr, fn)	\
 | 
				
			||||||
ENTRY(name)				\
 | 
					ENTRY(name)				\
 | 
				
			||||||
	RING0_INT_FRAME;		\
 | 
						RING0_INT_FRAME;		\
 | 
				
			||||||
	pushl $~(nr);			\
 | 
						pushl $~(nr);			\
 | 
				
			||||||
| 
						 | 
					@ -680,11 +680,13 @@ ENTRY(name)				\
 | 
				
			||||||
	SAVE_ALL;			\
 | 
						SAVE_ALL;			\
 | 
				
			||||||
	TRACE_IRQS_OFF			\
 | 
						TRACE_IRQS_OFF			\
 | 
				
			||||||
	movl %esp,%eax;			\
 | 
						movl %esp,%eax;			\
 | 
				
			||||||
	call smp_##name;		\
 | 
						call fn;			\
 | 
				
			||||||
	jmp ret_from_intr;		\
 | 
						jmp ret_from_intr;		\
 | 
				
			||||||
	CFI_ENDPROC;			\
 | 
						CFI_ENDPROC;			\
 | 
				
			||||||
ENDPROC(name)
 | 
					ENDPROC(name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(name, nr, smp_##name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* The include is where all of the SMP etc. interrupts come from */
 | 
					/* The include is where all of the SMP etc. interrupts come from */
 | 
				
			||||||
#include "entry_arch.h"
 | 
					#include "entry_arch.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -52,6 +52,7 @@
 | 
				
			||||||
#include <asm/irqflags.h>
 | 
					#include <asm/irqflags.h>
 | 
				
			||||||
#include <asm/paravirt.h>
 | 
					#include <asm/paravirt.h>
 | 
				
			||||||
#include <asm/ftrace.h>
 | 
					#include <asm/ftrace.h>
 | 
				
			||||||
 | 
					#include <asm/percpu.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 | 
					/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 | 
				
			||||||
#include <linux/elf-em.h>
 | 
					#include <linux/elf-em.h>
 | 
				
			||||||
| 
						 | 
					@ -209,7 +210,7 @@ ENTRY(native_usergs_sysret64)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* %rsp:at FRAMEEND */
 | 
						/* %rsp:at FRAMEEND */
 | 
				
			||||||
	.macro FIXUP_TOP_OF_STACK tmp offset=0
 | 
						.macro FIXUP_TOP_OF_STACK tmp offset=0
 | 
				
			||||||
	movq %gs:pda_oldrsp,\tmp
 | 
						movq PER_CPU_VAR(old_rsp),\tmp
 | 
				
			||||||
	movq \tmp,RSP+\offset(%rsp)
 | 
						movq \tmp,RSP+\offset(%rsp)
 | 
				
			||||||
	movq $__USER_DS,SS+\offset(%rsp)
 | 
						movq $__USER_DS,SS+\offset(%rsp)
 | 
				
			||||||
	movq $__USER_CS,CS+\offset(%rsp)
 | 
						movq $__USER_CS,CS+\offset(%rsp)
 | 
				
			||||||
| 
						 | 
					@ -220,7 +221,7 @@ ENTRY(native_usergs_sysret64)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	.macro RESTORE_TOP_OF_STACK tmp offset=0
 | 
						.macro RESTORE_TOP_OF_STACK tmp offset=0
 | 
				
			||||||
	movq RSP+\offset(%rsp),\tmp
 | 
						movq RSP+\offset(%rsp),\tmp
 | 
				
			||||||
	movq \tmp,%gs:pda_oldrsp
 | 
						movq \tmp,PER_CPU_VAR(old_rsp)
 | 
				
			||||||
	movq EFLAGS+\offset(%rsp),\tmp
 | 
						movq EFLAGS+\offset(%rsp),\tmp
 | 
				
			||||||
	movq \tmp,R11+\offset(%rsp)
 | 
						movq \tmp,R11+\offset(%rsp)
 | 
				
			||||||
	.endm
 | 
						.endm
 | 
				
			||||||
| 
						 | 
					@ -336,15 +337,15 @@ ENTRY(save_args)
 | 
				
			||||||
	je 1f
 | 
						je 1f
 | 
				
			||||||
	SWAPGS
 | 
						SWAPGS
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * irqcount is used to check if a CPU is already on an interrupt stack
 | 
						 * irq_count is used to check if a CPU is already on an interrupt stack
 | 
				
			||||||
	 * or not. While this is essentially redundant with preempt_count it is
 | 
						 * or not. While this is essentially redundant with preempt_count it is
 | 
				
			||||||
	 * a little cheaper to use a separate counter in the PDA (short of
 | 
						 * a little cheaper to use a separate counter in the PDA (short of
 | 
				
			||||||
	 * moving irq_enter into assembly, which would be too much work)
 | 
						 * moving irq_enter into assembly, which would be too much work)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
1:	incl %gs:pda_irqcount
 | 
					1:	incl PER_CPU_VAR(irq_count)
 | 
				
			||||||
	jne 2f
 | 
						jne 2f
 | 
				
			||||||
	popq_cfi %rax			/* move return address... */
 | 
						popq_cfi %rax			/* move return address... */
 | 
				
			||||||
	mov %gs:pda_irqstackptr,%rsp
 | 
						mov PER_CPU_VAR(irq_stack_ptr),%rsp
 | 
				
			||||||
	EMPTY_FRAME 0
 | 
						EMPTY_FRAME 0
 | 
				
			||||||
	pushq_cfi %rax			/* ... to the new stack */
 | 
						pushq_cfi %rax			/* ... to the new stack */
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -467,7 +468,7 @@ END(ret_from_fork)
 | 
				
			||||||
ENTRY(system_call)
 | 
					ENTRY(system_call)
 | 
				
			||||||
	CFI_STARTPROC	simple
 | 
						CFI_STARTPROC	simple
 | 
				
			||||||
	CFI_SIGNAL_FRAME
 | 
						CFI_SIGNAL_FRAME
 | 
				
			||||||
	CFI_DEF_CFA	rsp,PDA_STACKOFFSET
 | 
						CFI_DEF_CFA	rsp,KERNEL_STACK_OFFSET
 | 
				
			||||||
	CFI_REGISTER	rip,rcx
 | 
						CFI_REGISTER	rip,rcx
 | 
				
			||||||
	/*CFI_REGISTER	rflags,r11*/
 | 
						/*CFI_REGISTER	rflags,r11*/
 | 
				
			||||||
	SWAPGS_UNSAFE_STACK
 | 
						SWAPGS_UNSAFE_STACK
 | 
				
			||||||
| 
						 | 
					@ -478,8 +479,8 @@ ENTRY(system_call)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
ENTRY(system_call_after_swapgs)
 | 
					ENTRY(system_call_after_swapgs)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	movq	%rsp,%gs:pda_oldrsp
 | 
						movq	%rsp,PER_CPU_VAR(old_rsp)
 | 
				
			||||||
	movq	%gs:pda_kernelstack,%rsp
 | 
						movq	PER_CPU_VAR(kernel_stack),%rsp
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * No need to follow this irqs off/on section - it's straight
 | 
						 * No need to follow this irqs off/on section - it's straight
 | 
				
			||||||
	 * and short:
 | 
						 * and short:
 | 
				
			||||||
| 
						 | 
					@ -522,7 +523,7 @@ sysret_check:
 | 
				
			||||||
	CFI_REGISTER	rip,rcx
 | 
						CFI_REGISTER	rip,rcx
 | 
				
			||||||
	RESTORE_ARGS 0,-ARG_SKIP,1
 | 
						RESTORE_ARGS 0,-ARG_SKIP,1
 | 
				
			||||||
	/*CFI_REGISTER	rflags,r11*/
 | 
						/*CFI_REGISTER	rflags,r11*/
 | 
				
			||||||
	movq	%gs:pda_oldrsp, %rsp
 | 
						movq	PER_CPU_VAR(old_rsp), %rsp
 | 
				
			||||||
	USERGS_SYSRET64
 | 
						USERGS_SYSRET64
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	CFI_RESTORE_STATE
 | 
						CFI_RESTORE_STATE
 | 
				
			||||||
| 
						 | 
					@ -832,11 +833,11 @@ common_interrupt:
 | 
				
			||||||
	XCPT_FRAME
 | 
						XCPT_FRAME
 | 
				
			||||||
	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
 | 
						addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
 | 
				
			||||||
	interrupt do_IRQ
 | 
						interrupt do_IRQ
 | 
				
			||||||
	/* 0(%rsp): oldrsp-ARGOFFSET */
 | 
						/* 0(%rsp): old_rsp-ARGOFFSET */
 | 
				
			||||||
ret_from_intr:
 | 
					ret_from_intr:
 | 
				
			||||||
	DISABLE_INTERRUPTS(CLBR_NONE)
 | 
						DISABLE_INTERRUPTS(CLBR_NONE)
 | 
				
			||||||
	TRACE_IRQS_OFF
 | 
						TRACE_IRQS_OFF
 | 
				
			||||||
	decl %gs:pda_irqcount
 | 
						decl PER_CPU_VAR(irq_count)
 | 
				
			||||||
	leaveq
 | 
						leaveq
 | 
				
			||||||
	CFI_DEF_CFA_REGISTER	rsp
 | 
						CFI_DEF_CFA_REGISTER	rsp
 | 
				
			||||||
	CFI_ADJUST_CFA_OFFSET	-8
 | 
						CFI_ADJUST_CFA_OFFSET	-8
 | 
				
			||||||
| 
						 | 
					@ -981,8 +982,10 @@ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
 | 
				
			||||||
	irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
 | 
						irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_UV
 | 
				
			||||||
apicinterrupt UV_BAU_MESSAGE \
 | 
					apicinterrupt UV_BAU_MESSAGE \
 | 
				
			||||||
	uv_bau_message_intr1 uv_bau_message_interrupt
 | 
						uv_bau_message_intr1 uv_bau_message_interrupt
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
apicinterrupt LOCAL_TIMER_VECTOR \
 | 
					apicinterrupt LOCAL_TIMER_VECTOR \
 | 
				
			||||||
	apic_timer_interrupt smp_apic_timer_interrupt
 | 
						apic_timer_interrupt smp_apic_timer_interrupt
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1072,10 +1075,10 @@ ENTRY(\sym)
 | 
				
			||||||
	TRACE_IRQS_OFF
 | 
						TRACE_IRQS_OFF
 | 
				
			||||||
	movq %rsp,%rdi		/* pt_regs pointer */
 | 
						movq %rsp,%rdi		/* pt_regs pointer */
 | 
				
			||||||
	xorl %esi,%esi		/* no error code */
 | 
						xorl %esi,%esi		/* no error code */
 | 
				
			||||||
	movq %gs:pda_data_offset, %rbp
 | 
						PER_CPU(init_tss, %rbp)
 | 
				
			||||||
	subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
 | 
						subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
 | 
				
			||||||
	call \do_sym
 | 
						call \do_sym
 | 
				
			||||||
	addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
 | 
						addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
 | 
				
			||||||
	jmp paranoid_exit	/* %ebx: no swapgs flag */
 | 
						jmp paranoid_exit	/* %ebx: no swapgs flag */
 | 
				
			||||||
	CFI_ENDPROC
 | 
						CFI_ENDPROC
 | 
				
			||||||
END(\sym)
 | 
					END(\sym)
 | 
				
			||||||
| 
						 | 
					@ -1259,14 +1262,14 @@ ENTRY(call_softirq)
 | 
				
			||||||
	CFI_REL_OFFSET rbp,0
 | 
						CFI_REL_OFFSET rbp,0
 | 
				
			||||||
	mov  %rsp,%rbp
 | 
						mov  %rsp,%rbp
 | 
				
			||||||
	CFI_DEF_CFA_REGISTER rbp
 | 
						CFI_DEF_CFA_REGISTER rbp
 | 
				
			||||||
	incl %gs:pda_irqcount
 | 
						incl PER_CPU_VAR(irq_count)
 | 
				
			||||||
	cmove %gs:pda_irqstackptr,%rsp
 | 
						cmove PER_CPU_VAR(irq_stack_ptr),%rsp
 | 
				
			||||||
	push  %rbp			# backlink for old unwinder
 | 
						push  %rbp			# backlink for old unwinder
 | 
				
			||||||
	call __do_softirq
 | 
						call __do_softirq
 | 
				
			||||||
	leaveq
 | 
						leaveq
 | 
				
			||||||
	CFI_DEF_CFA_REGISTER	rsp
 | 
						CFI_DEF_CFA_REGISTER	rsp
 | 
				
			||||||
	CFI_ADJUST_CFA_OFFSET   -8
 | 
						CFI_ADJUST_CFA_OFFSET   -8
 | 
				
			||||||
	decl %gs:pda_irqcount
 | 
						decl PER_CPU_VAR(irq_count)
 | 
				
			||||||
	ret
 | 
						ret
 | 
				
			||||||
	CFI_ENDPROC
 | 
						CFI_ENDPROC
 | 
				
			||||||
END(call_softirq)
 | 
					END(call_softirq)
 | 
				
			||||||
| 
						 | 
					@ -1296,15 +1299,15 @@ ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
 | 
				
			||||||
	movq %rdi, %rsp            # we don't return, adjust the stack frame
 | 
						movq %rdi, %rsp            # we don't return, adjust the stack frame
 | 
				
			||||||
	CFI_ENDPROC
 | 
						CFI_ENDPROC
 | 
				
			||||||
	DEFAULT_FRAME
 | 
						DEFAULT_FRAME
 | 
				
			||||||
11:	incl %gs:pda_irqcount
 | 
					11:	incl PER_CPU_VAR(irq_count)
 | 
				
			||||||
	movq %rsp,%rbp
 | 
						movq %rsp,%rbp
 | 
				
			||||||
	CFI_DEF_CFA_REGISTER rbp
 | 
						CFI_DEF_CFA_REGISTER rbp
 | 
				
			||||||
	cmovzq %gs:pda_irqstackptr,%rsp
 | 
						cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
 | 
				
			||||||
	pushq %rbp			# backlink for old unwinder
 | 
						pushq %rbp			# backlink for old unwinder
 | 
				
			||||||
	call xen_evtchn_do_upcall
 | 
						call xen_evtchn_do_upcall
 | 
				
			||||||
	popq %rsp
 | 
						popq %rsp
 | 
				
			||||||
	CFI_DEF_CFA_REGISTER rsp
 | 
						CFI_DEF_CFA_REGISTER rsp
 | 
				
			||||||
	decl %gs:pda_irqcount
 | 
						decl PER_CPU_VAR(irq_count)
 | 
				
			||||||
	jmp  error_exit
 | 
						jmp  error_exit
 | 
				
			||||||
	CFI_ENDPROC
 | 
						CFI_ENDPROC
 | 
				
			||||||
END(do_hypervisor_callback)
 | 
					END(do_hypervisor_callback)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,7 +32,9 @@ extern struct genapic apic_x2apic_cluster;
 | 
				
			||||||
struct genapic __read_mostly *genapic = &apic_flat;
 | 
					struct genapic __read_mostly *genapic = &apic_flat;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct genapic *apic_probe[] __initdata = {
 | 
					static struct genapic *apic_probe[] __initdata = {
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86_UV
 | 
				
			||||||
	&apic_x2apic_uv_x,
 | 
						&apic_x2apic_uv_x,
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
	&apic_x2apic_phys,
 | 
						&apic_x2apic_phys,
 | 
				
			||||||
	&apic_x2apic_cluster,
 | 
						&apic_x2apic_cluster,
 | 
				
			||||||
	&apic_physflat,
 | 
						&apic_physflat,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -25,6 +25,7 @@
 | 
				
			||||||
#include <asm/ipi.h>
 | 
					#include <asm/ipi.h>
 | 
				
			||||||
#include <asm/genapic.h>
 | 
					#include <asm/genapic.h>
 | 
				
			||||||
#include <asm/pgtable.h>
 | 
					#include <asm/pgtable.h>
 | 
				
			||||||
 | 
					#include <asm/uv/uv.h>
 | 
				
			||||||
#include <asm/uv/uv_mmrs.h>
 | 
					#include <asm/uv/uv_mmrs.h>
 | 
				
			||||||
#include <asm/uv/uv_hub.h>
 | 
					#include <asm/uv/uv_hub.h>
 | 
				
			||||||
#include <asm/uv/bios.h>
 | 
					#include <asm/uv/bios.h>
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -26,27 +26,6 @@
 | 
				
			||||||
#include <asm/bios_ebda.h>
 | 
					#include <asm/bios_ebda.h>
 | 
				
			||||||
#include <asm/trampoline.h>
 | 
					#include <asm/trampoline.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* boot cpu pda */
 | 
					 | 
				
			||||||
static struct x8664_pda _boot_cpu_pda;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * We install an empty cpu_pda pointer table to indicate to early users
 | 
					 | 
				
			||||||
 * (numa_set_node) that the cpu_pda pointer table for cpus other than
 | 
					 | 
				
			||||||
 * the boot cpu is not yet setup.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata;
 | 
					 | 
				
			||||||
#else
 | 
					 | 
				
			||||||
static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly;
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void __init x86_64_init_pda(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	_cpu_pda = __cpu_pda;
 | 
					 | 
				
			||||||
	cpu_pda(0) = &_boot_cpu_pda;
 | 
					 | 
				
			||||||
	pda_init(0);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void __init zap_identity_mappings(void)
 | 
					static void __init zap_identity_mappings(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	pgd_t *pgd = pgd_offset_k(0UL);
 | 
						pgd_t *pgd = pgd_offset_k(0UL);
 | 
				
			||||||
| 
						 | 
					@ -112,8 +91,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
 | 
				
			||||||
	if (console_loglevel == 10)
 | 
						if (console_loglevel == 10)
 | 
				
			||||||
		early_printk("Kernel alive\n");
 | 
							early_printk("Kernel alive\n");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	x86_64_init_pda();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	x86_64_start_reservations(real_mode_data);
 | 
						x86_64_start_reservations(real_mode_data);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -429,12 +429,14 @@ is386:	movl $2,%ecx		# set MP
 | 
				
			||||||
	ljmp $(__KERNEL_CS),$1f
 | 
						ljmp $(__KERNEL_CS),$1f
 | 
				
			||||||
1:	movl $(__KERNEL_DS),%eax	# reload all the segment registers
 | 
					1:	movl $(__KERNEL_DS),%eax	# reload all the segment registers
 | 
				
			||||||
	movl %eax,%ss			# after changing gdt.
 | 
						movl %eax,%ss			# after changing gdt.
 | 
				
			||||||
	movl %eax,%fs			# gets reset once there's real percpu
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	movl $(__USER_DS),%eax		# DS/ES contains default USER segment
 | 
						movl $(__USER_DS),%eax		# DS/ES contains default USER segment
 | 
				
			||||||
	movl %eax,%ds
 | 
						movl %eax,%ds
 | 
				
			||||||
	movl %eax,%es
 | 
						movl %eax,%es
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						movl $(__KERNEL_PERCPU), %eax
 | 
				
			||||||
 | 
						movl %eax,%fs			# set this cpu's percpu
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	xorl %eax,%eax			# Clear GS and LDT
 | 
						xorl %eax,%eax			# Clear GS and LDT
 | 
				
			||||||
	movl %eax,%gs
 | 
						movl %eax,%gs
 | 
				
			||||||
	lldt %ax
 | 
						lldt %ax
 | 
				
			||||||
| 
						 | 
					@ -446,8 +448,6 @@ is386:	movl $2,%ecx		# set MP
 | 
				
			||||||
	movb $1, ready
 | 
						movb $1, ready
 | 
				
			||||||
	cmpb $0,%cl		# the first CPU calls start_kernel
 | 
						cmpb $0,%cl		# the first CPU calls start_kernel
 | 
				
			||||||
	je   1f
 | 
						je   1f
 | 
				
			||||||
	movl $(__KERNEL_PERCPU), %eax
 | 
					 | 
				
			||||||
	movl %eax,%fs		# set this cpu's percpu
 | 
					 | 
				
			||||||
	movl (stack_start), %esp
 | 
						movl (stack_start), %esp
 | 
				
			||||||
1:
 | 
					1:
 | 
				
			||||||
#endif /* CONFIG_SMP */
 | 
					#endif /* CONFIG_SMP */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -19,6 +19,7 @@
 | 
				
			||||||
#include <asm/msr.h>
 | 
					#include <asm/msr.h>
 | 
				
			||||||
#include <asm/cache.h>
 | 
					#include <asm/cache.h>
 | 
				
			||||||
#include <asm/processor-flags.h>
 | 
					#include <asm/processor-flags.h>
 | 
				
			||||||
 | 
					#include <asm/percpu.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_PARAVIRT
 | 
					#ifdef CONFIG_PARAVIRT
 | 
				
			||||||
#include <asm/asm-offsets.h>
 | 
					#include <asm/asm-offsets.h>
 | 
				
			||||||
| 
						 | 
					@ -204,6 +205,19 @@ ENTRY(secondary_startup_64)
 | 
				
			||||||
	pushq $0
 | 
						pushq $0
 | 
				
			||||||
	popfq
 | 
						popfq
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Fix up static pointers that need __per_cpu_load added.  The assembler
 | 
				
			||||||
 | 
						 * is unable to do this directly.  This is only needed for the boot cpu.
 | 
				
			||||||
 | 
						 * These values are set up with the correct base addresses by C code for
 | 
				
			||||||
 | 
						 * secondary cpus.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						movq	initial_gs(%rip), %rax
 | 
				
			||||||
 | 
						cmpl	$0, per_cpu__cpu_number(%rax)
 | 
				
			||||||
 | 
						jne	1f
 | 
				
			||||||
 | 
						addq	%rax, early_gdt_descr_base(%rip)
 | 
				
			||||||
 | 
					1:
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * We must switch to a new descriptor in kernel space for the GDT
 | 
						 * We must switch to a new descriptor in kernel space for the GDT
 | 
				
			||||||
	 * because soon the kernel won't have access anymore to the userspace
 | 
						 * because soon the kernel won't have access anymore to the userspace
 | 
				
			||||||
| 
						 | 
					@ -226,12 +240,15 @@ ENTRY(secondary_startup_64)
 | 
				
			||||||
	movl %eax,%fs
 | 
						movl %eax,%fs
 | 
				
			||||||
	movl %eax,%gs
 | 
						movl %eax,%gs
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* 
 | 
						/* Set up %gs.
 | 
				
			||||||
	 * Setup up a dummy PDA. this is just for some early bootup code
 | 
						 *
 | 
				
			||||||
	 * that does in_interrupt() 
 | 
						 * The base of %gs always points to the bottom of the irqstack
 | 
				
			||||||
 | 
						 * union.  If the stack protector canary is enabled, it is
 | 
				
			||||||
 | 
						 * located at %gs:40.  Note that, on SMP, the boot cpu uses
 | 
				
			||||||
 | 
						 * init data section till per cpu areas are set up.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	movl	$MSR_GS_BASE,%ecx
 | 
						movl	$MSR_GS_BASE,%ecx
 | 
				
			||||||
	movq	$empty_zero_page,%rax
 | 
						movq	initial_gs(%rip),%rax
 | 
				
			||||||
	movq    %rax,%rdx
 | 
						movq    %rax,%rdx
 | 
				
			||||||
	shrq	$32,%rdx
 | 
						shrq	$32,%rdx
 | 
				
			||||||
	wrmsr	
 | 
						wrmsr	
 | 
				
			||||||
| 
						 | 
					@ -257,6 +274,12 @@ ENTRY(secondary_startup_64)
 | 
				
			||||||
	.align	8
 | 
						.align	8
 | 
				
			||||||
	ENTRY(initial_code)
 | 
						ENTRY(initial_code)
 | 
				
			||||||
	.quad	x86_64_start_kernel
 | 
						.quad	x86_64_start_kernel
 | 
				
			||||||
 | 
						ENTRY(initial_gs)
 | 
				
			||||||
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
 | 
						.quad	__per_cpu_load
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
						.quad	PER_CPU_VAR(irq_stack_union)
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
	__FINITDATA
 | 
						__FINITDATA
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ENTRY(stack_start)
 | 
						ENTRY(stack_start)
 | 
				
			||||||
| 
						 | 
					@ -401,6 +424,7 @@ NEXT_PAGE(level2_spare_pgt)
 | 
				
			||||||
	.globl early_gdt_descr
 | 
						.globl early_gdt_descr
 | 
				
			||||||
early_gdt_descr:
 | 
					early_gdt_descr:
 | 
				
			||||||
	.word	GDT_ENTRIES*8-1
 | 
						.word	GDT_ENTRIES*8-1
 | 
				
			||||||
 | 
					early_gdt_descr_base:
 | 
				
			||||||
	.quad	per_cpu__gdt_page
 | 
						.quad	per_cpu__gdt_page
 | 
				
			||||||
 | 
					
 | 
				
			||||||
ENTRY(phys_base)
 | 
					ENTRY(phys_base)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -46,6 +46,7 @@
 | 
				
			||||||
#include <asm/idle.h>
 | 
					#include <asm/idle.h>
 | 
				
			||||||
#include <asm/io.h>
 | 
					#include <asm/io.h>
 | 
				
			||||||
#include <asm/smp.h>
 | 
					#include <asm/smp.h>
 | 
				
			||||||
 | 
					#include <asm/cpu.h>
 | 
				
			||||||
#include <asm/desc.h>
 | 
					#include <asm/desc.h>
 | 
				
			||||||
#include <asm/proto.h>
 | 
					#include <asm/proto.h>
 | 
				
			||||||
#include <asm/acpi.h>
 | 
					#include <asm/acpi.h>
 | 
				
			||||||
| 
						 | 
					@ -82,11 +83,11 @@ static DEFINE_SPINLOCK(vector_lock);
 | 
				
			||||||
int nr_ioapic_registers[MAX_IO_APICS];
 | 
					int nr_ioapic_registers[MAX_IO_APICS];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* I/O APIC entries */
 | 
					/* I/O APIC entries */
 | 
				
			||||||
struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
 | 
					struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
 | 
				
			||||||
int nr_ioapics;
 | 
					int nr_ioapics;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* MP IRQ source entries */
 | 
					/* MP IRQ source entries */
 | 
				
			||||||
struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
 | 
					struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* # of MP IRQ source entries */
 | 
					/* # of MP IRQ source entries */
 | 
				
			||||||
int mp_irq_entries;
 | 
					int mp_irq_entries;
 | 
				
			||||||
| 
						 | 
					@ -356,7 +357,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!cfg->move_in_progress) {
 | 
						if (!cfg->move_in_progress) {
 | 
				
			||||||
		/* it means that domain is not changed */
 | 
							/* it means that domain is not changed */
 | 
				
			||||||
		if (!cpumask_intersects(&desc->affinity, mask))
 | 
							if (!cpumask_intersects(desc->affinity, mask))
 | 
				
			||||||
			cfg->move_desc_pending = 1;
 | 
								cfg->move_desc_pending = 1;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -386,7 +387,7 @@ struct io_apic {
 | 
				
			||||||
static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
 | 
					static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
 | 
						return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
 | 
				
			||||||
		+ (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
 | 
							+ (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
 | 
					static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
 | 
				
			||||||
| 
						 | 
					@ -579,9 +580,9 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
 | 
				
			||||||
	if (assign_irq_vector(irq, cfg, mask))
 | 
						if (assign_irq_vector(irq, cfg, mask))
 | 
				
			||||||
		return BAD_APICID;
 | 
							return BAD_APICID;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpumask_and(&desc->affinity, cfg->domain, mask);
 | 
						cpumask_and(desc->affinity, cfg->domain, mask);
 | 
				
			||||||
	set_extra_move_desc(desc, mask);
 | 
						set_extra_move_desc(desc, mask);
 | 
				
			||||||
	return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
 | 
						return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void
 | 
					static void
 | 
				
			||||||
| 
						 | 
					@ -944,10 +945,10 @@ static int find_irq_entry(int apic, int pin, int type)
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < mp_irq_entries; i++)
 | 
						for (i = 0; i < mp_irq_entries; i++)
 | 
				
			||||||
		if (mp_irqs[i].mp_irqtype == type &&
 | 
							if (mp_irqs[i].irqtype == type &&
 | 
				
			||||||
		    (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
 | 
							    (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
 | 
				
			||||||
		     mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
 | 
							     mp_irqs[i].dstapic == MP_APIC_ALL) &&
 | 
				
			||||||
		    mp_irqs[i].mp_dstirq == pin)
 | 
							    mp_irqs[i].dstirq == pin)
 | 
				
			||||||
			return i;
 | 
								return i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return -1;
 | 
						return -1;
 | 
				
			||||||
| 
						 | 
					@ -961,13 +962,13 @@ static int __init find_isa_irq_pin(int irq, int type)
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < mp_irq_entries; i++) {
 | 
						for (i = 0; i < mp_irq_entries; i++) {
 | 
				
			||||||
		int lbus = mp_irqs[i].mp_srcbus;
 | 
							int lbus = mp_irqs[i].srcbus;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (test_bit(lbus, mp_bus_not_pci) &&
 | 
							if (test_bit(lbus, mp_bus_not_pci) &&
 | 
				
			||||||
		    (mp_irqs[i].mp_irqtype == type) &&
 | 
							    (mp_irqs[i].irqtype == type) &&
 | 
				
			||||||
		    (mp_irqs[i].mp_srcbusirq == irq))
 | 
							    (mp_irqs[i].srcbusirq == irq))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			return mp_irqs[i].mp_dstirq;
 | 
								return mp_irqs[i].dstirq;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return -1;
 | 
						return -1;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -977,17 +978,17 @@ static int __init find_isa_irq_apic(int irq, int type)
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < mp_irq_entries; i++) {
 | 
						for (i = 0; i < mp_irq_entries; i++) {
 | 
				
			||||||
		int lbus = mp_irqs[i].mp_srcbus;
 | 
							int lbus = mp_irqs[i].srcbus;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (test_bit(lbus, mp_bus_not_pci) &&
 | 
							if (test_bit(lbus, mp_bus_not_pci) &&
 | 
				
			||||||
		    (mp_irqs[i].mp_irqtype == type) &&
 | 
							    (mp_irqs[i].irqtype == type) &&
 | 
				
			||||||
		    (mp_irqs[i].mp_srcbusirq == irq))
 | 
							    (mp_irqs[i].srcbusirq == irq))
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (i < mp_irq_entries) {
 | 
						if (i < mp_irq_entries) {
 | 
				
			||||||
		int apic;
 | 
							int apic;
 | 
				
			||||||
		for(apic = 0; apic < nr_ioapics; apic++) {
 | 
							for(apic = 0; apic < nr_ioapics; apic++) {
 | 
				
			||||||
			if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
 | 
								if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
 | 
				
			||||||
				return apic;
 | 
									return apic;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -1012,23 +1013,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
 | 
				
			||||||
		return -1;
 | 
							return -1;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for (i = 0; i < mp_irq_entries; i++) {
 | 
						for (i = 0; i < mp_irq_entries; i++) {
 | 
				
			||||||
		int lbus = mp_irqs[i].mp_srcbus;
 | 
							int lbus = mp_irqs[i].srcbus;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for (apic = 0; apic < nr_ioapics; apic++)
 | 
							for (apic = 0; apic < nr_ioapics; apic++)
 | 
				
			||||||
			if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
 | 
								if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
 | 
				
			||||||
			    mp_irqs[i].mp_dstapic == MP_APIC_ALL)
 | 
								    mp_irqs[i].dstapic == MP_APIC_ALL)
 | 
				
			||||||
				break;
 | 
									break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!test_bit(lbus, mp_bus_not_pci) &&
 | 
							if (!test_bit(lbus, mp_bus_not_pci) &&
 | 
				
			||||||
		    !mp_irqs[i].mp_irqtype &&
 | 
							    !mp_irqs[i].irqtype &&
 | 
				
			||||||
		    (bus == lbus) &&
 | 
							    (bus == lbus) &&
 | 
				
			||||||
		    (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
 | 
							    (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
 | 
				
			||||||
			int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
 | 
								int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!(apic || IO_APIC_IRQ(irq)))
 | 
								if (!(apic || IO_APIC_IRQ(irq)))
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (pin == (mp_irqs[i].mp_srcbusirq & 3))
 | 
								if (pin == (mp_irqs[i].srcbusirq & 3))
 | 
				
			||||||
				return irq;
 | 
									return irq;
 | 
				
			||||||
			/*
 | 
								/*
 | 
				
			||||||
			 * Use the first all-but-pin matching entry as a
 | 
								 * Use the first all-but-pin matching entry as a
 | 
				
			||||||
| 
						 | 
					@ -1071,7 +1072,7 @@ static int EISA_ELCR(unsigned int irq)
 | 
				
			||||||
 * EISA conforming in the MP table, that means its trigger type must
 | 
					 * EISA conforming in the MP table, that means its trigger type must
 | 
				
			||||||
 * be read in from the ELCR */
 | 
					 * be read in from the ELCR */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
 | 
					#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].srcbusirq))
 | 
				
			||||||
#define default_EISA_polarity(idx)	default_ISA_polarity(idx)
 | 
					#define default_EISA_polarity(idx)	default_ISA_polarity(idx)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* PCI interrupts are always polarity one level triggered,
 | 
					/* PCI interrupts are always polarity one level triggered,
 | 
				
			||||||
| 
						 | 
					@ -1088,13 +1089,13 @@ static int EISA_ELCR(unsigned int irq)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int MPBIOS_polarity(int idx)
 | 
					static int MPBIOS_polarity(int idx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int bus = mp_irqs[idx].mp_srcbus;
 | 
						int bus = mp_irqs[idx].srcbus;
 | 
				
			||||||
	int polarity;
 | 
						int polarity;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Determine IRQ line polarity (high active or low active):
 | 
						 * Determine IRQ line polarity (high active or low active):
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	switch (mp_irqs[idx].mp_irqflag & 3)
 | 
						switch (mp_irqs[idx].irqflag & 3)
 | 
				
			||||||
	{
 | 
						{
 | 
				
			||||||
		case 0: /* conforms, ie. bus-type dependent polarity */
 | 
							case 0: /* conforms, ie. bus-type dependent polarity */
 | 
				
			||||||
			if (test_bit(bus, mp_bus_not_pci))
 | 
								if (test_bit(bus, mp_bus_not_pci))
 | 
				
			||||||
| 
						 | 
					@ -1130,13 +1131,13 @@ static int MPBIOS_polarity(int idx)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int MPBIOS_trigger(int idx)
 | 
					static int MPBIOS_trigger(int idx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int bus = mp_irqs[idx].mp_srcbus;
 | 
						int bus = mp_irqs[idx].srcbus;
 | 
				
			||||||
	int trigger;
 | 
						int trigger;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Determine IRQ trigger mode (edge or level sensitive):
 | 
						 * Determine IRQ trigger mode (edge or level sensitive):
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
 | 
						switch ((mp_irqs[idx].irqflag>>2) & 3)
 | 
				
			||||||
	{
 | 
						{
 | 
				
			||||||
		case 0: /* conforms, ie. bus-type dependent */
 | 
							case 0: /* conforms, ie. bus-type dependent */
 | 
				
			||||||
			if (test_bit(bus, mp_bus_not_pci))
 | 
								if (test_bit(bus, mp_bus_not_pci))
 | 
				
			||||||
| 
						 | 
					@ -1214,16 +1215,16 @@ int (*ioapic_renumber_irq)(int ioapic, int irq);
 | 
				
			||||||
static int pin_2_irq(int idx, int apic, int pin)
 | 
					static int pin_2_irq(int idx, int apic, int pin)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int irq, i;
 | 
						int irq, i;
 | 
				
			||||||
	int bus = mp_irqs[idx].mp_srcbus;
 | 
						int bus = mp_irqs[idx].srcbus;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Debugging check, we are in big trouble if this message pops up!
 | 
						 * Debugging check, we are in big trouble if this message pops up!
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (mp_irqs[idx].mp_dstirq != pin)
 | 
						if (mp_irqs[idx].dstirq != pin)
 | 
				
			||||||
		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
 | 
							printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (test_bit(bus, mp_bus_not_pci)) {
 | 
						if (test_bit(bus, mp_bus_not_pci)) {
 | 
				
			||||||
		irq = mp_irqs[idx].mp_srcbusirq;
 | 
							irq = mp_irqs[idx].srcbusirq;
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * PCI IRQs are mapped in order
 | 
							 * PCI IRQs are mapped in order
 | 
				
			||||||
| 
						 | 
					@ -1566,14 +1567,14 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
 | 
				
			||||||
	apic_printk(APIC_VERBOSE,KERN_DEBUG
 | 
						apic_printk(APIC_VERBOSE,KERN_DEBUG
 | 
				
			||||||
		    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
 | 
							    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
 | 
				
			||||||
		    "IRQ %d Mode:%i Active:%i)\n",
 | 
							    "IRQ %d Mode:%i Active:%i)\n",
 | 
				
			||||||
		    apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
 | 
							    apic, mp_ioapics[apic].apicid, pin, cfg->vector,
 | 
				
			||||||
		    irq, trigger, polarity);
 | 
							    irq, trigger, polarity);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
 | 
						if (setup_ioapic_entry(mp_ioapics[apic].apicid, irq, &entry,
 | 
				
			||||||
			       dest, trigger, polarity, cfg->vector)) {
 | 
								       dest, trigger, polarity, cfg->vector)) {
 | 
				
			||||||
		printk("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
 | 
							printk("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
 | 
				
			||||||
		       mp_ioapics[apic].mp_apicid, pin);
 | 
							       mp_ioapics[apic].apicid, pin);
 | 
				
			||||||
		__clear_irq_vector(irq, cfg);
 | 
							__clear_irq_vector(irq, cfg);
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -1604,12 +1605,10 @@ static void __init setup_IO_APIC_irqs(void)
 | 
				
			||||||
					notcon = 1;
 | 
										notcon = 1;
 | 
				
			||||||
					apic_printk(APIC_VERBOSE,
 | 
										apic_printk(APIC_VERBOSE,
 | 
				
			||||||
						KERN_DEBUG " %d-%d",
 | 
											KERN_DEBUG " %d-%d",
 | 
				
			||||||
						mp_ioapics[apic].mp_apicid,
 | 
											mp_ioapics[apic].apicid, pin);
 | 
				
			||||||
						pin);
 | 
					 | 
				
			||||||
				} else
 | 
									} else
 | 
				
			||||||
					apic_printk(APIC_VERBOSE, " %d-%d",
 | 
										apic_printk(APIC_VERBOSE, " %d-%d",
 | 
				
			||||||
						mp_ioapics[apic].mp_apicid,
 | 
											mp_ioapics[apic].apicid, pin);
 | 
				
			||||||
						pin);
 | 
					 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if (notcon) {
 | 
								if (notcon) {
 | 
				
			||||||
| 
						 | 
					@ -1699,7 +1698,7 @@ __apicdebuginit(void) print_IO_APIC(void)
 | 
				
			||||||
	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
 | 
						printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
 | 
				
			||||||
	for (i = 0; i < nr_ioapics; i++)
 | 
						for (i = 0; i < nr_ioapics; i++)
 | 
				
			||||||
		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
 | 
							printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
 | 
				
			||||||
		       mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
 | 
							       mp_ioapics[i].apicid, nr_ioapic_registers[i]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * We are a bit conservative about what we expect.  We have to
 | 
						 * We are a bit conservative about what we expect.  We have to
 | 
				
			||||||
| 
						 | 
					@ -1719,7 +1718,7 @@ __apicdebuginit(void) print_IO_APIC(void)
 | 
				
			||||||
	spin_unlock_irqrestore(&ioapic_lock, flags);
 | 
						spin_unlock_irqrestore(&ioapic_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	printk("\n");
 | 
						printk("\n");
 | 
				
			||||||
	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
 | 
						printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
 | 
				
			||||||
	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
 | 
						printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
 | 
				
			||||||
	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
 | 
						printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
 | 
				
			||||||
	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
 | 
						printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
 | 
				
			||||||
| 
						 | 
					@ -2121,14 +2120,14 @@ static void __init setup_ioapic_ids_from_mpc(void)
 | 
				
			||||||
		reg_00.raw = io_apic_read(apic, 0);
 | 
							reg_00.raw = io_apic_read(apic, 0);
 | 
				
			||||||
		spin_unlock_irqrestore(&ioapic_lock, flags);
 | 
							spin_unlock_irqrestore(&ioapic_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		old_id = mp_ioapics[apic].mp_apicid;
 | 
							old_id = mp_ioapics[apic].apicid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
 | 
							if (mp_ioapics[apic].apicid >= get_physical_broadcast()) {
 | 
				
			||||||
			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
 | 
								printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
 | 
				
			||||||
				apic, mp_ioapics[apic].mp_apicid);
 | 
									apic, mp_ioapics[apic].apicid);
 | 
				
			||||||
			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
 | 
								printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
 | 
				
			||||||
				reg_00.bits.ID);
 | 
									reg_00.bits.ID);
 | 
				
			||||||
			mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
 | 
								mp_ioapics[apic].apicid = reg_00.bits.ID;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -2137,9 +2136,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
 | 
				
			||||||
		 * 'stuck on smp_invalidate_needed IPI wait' messages.
 | 
							 * 'stuck on smp_invalidate_needed IPI wait' messages.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (check_apicid_used(phys_id_present_map,
 | 
							if (check_apicid_used(phys_id_present_map,
 | 
				
			||||||
					mp_ioapics[apic].mp_apicid)) {
 | 
										mp_ioapics[apic].apicid)) {
 | 
				
			||||||
			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
 | 
								printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
 | 
				
			||||||
				apic, mp_ioapics[apic].mp_apicid);
 | 
									apic, mp_ioapics[apic].apicid);
 | 
				
			||||||
			for (i = 0; i < get_physical_broadcast(); i++)
 | 
								for (i = 0; i < get_physical_broadcast(); i++)
 | 
				
			||||||
				if (!physid_isset(i, phys_id_present_map))
 | 
									if (!physid_isset(i, phys_id_present_map))
 | 
				
			||||||
					break;
 | 
										break;
 | 
				
			||||||
| 
						 | 
					@ -2148,13 +2147,13 @@ static void __init setup_ioapic_ids_from_mpc(void)
 | 
				
			||||||
			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
 | 
								printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
 | 
				
			||||||
				i);
 | 
									i);
 | 
				
			||||||
			physid_set(i, phys_id_present_map);
 | 
								physid_set(i, phys_id_present_map);
 | 
				
			||||||
			mp_ioapics[apic].mp_apicid = i;
 | 
								mp_ioapics[apic].apicid = i;
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			physid_mask_t tmp;
 | 
								physid_mask_t tmp;
 | 
				
			||||||
			tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
 | 
								tmp = apicid_to_cpu_present(mp_ioapics[apic].apicid);
 | 
				
			||||||
			apic_printk(APIC_VERBOSE, "Setting %d in the "
 | 
								apic_printk(APIC_VERBOSE, "Setting %d in the "
 | 
				
			||||||
					"phys_id_present_map\n",
 | 
										"phys_id_present_map\n",
 | 
				
			||||||
					mp_ioapics[apic].mp_apicid);
 | 
										mp_ioapics[apic].apicid);
 | 
				
			||||||
			physids_or(phys_id_present_map, phys_id_present_map, tmp);
 | 
								physids_or(phys_id_present_map, phys_id_present_map, tmp);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2163,11 +2162,11 @@ static void __init setup_ioapic_ids_from_mpc(void)
 | 
				
			||||||
		 * We need to adjust the IRQ routing table
 | 
							 * We need to adjust the IRQ routing table
 | 
				
			||||||
		 * if the ID changed.
 | 
							 * if the ID changed.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (old_id != mp_ioapics[apic].mp_apicid)
 | 
							if (old_id != mp_ioapics[apic].apicid)
 | 
				
			||||||
			for (i = 0; i < mp_irq_entries; i++)
 | 
								for (i = 0; i < mp_irq_entries; i++)
 | 
				
			||||||
				if (mp_irqs[i].mp_dstapic == old_id)
 | 
									if (mp_irqs[i].dstapic == old_id)
 | 
				
			||||||
					mp_irqs[i].mp_dstapic
 | 
										mp_irqs[i].dstapic
 | 
				
			||||||
						= mp_ioapics[apic].mp_apicid;
 | 
											= mp_ioapics[apic].apicid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * Read the right value from the MPC table and
 | 
							 * Read the right value from the MPC table and
 | 
				
			||||||
| 
						 | 
					@ -2175,9 +2174,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		apic_printk(APIC_VERBOSE, KERN_INFO
 | 
							apic_printk(APIC_VERBOSE, KERN_INFO
 | 
				
			||||||
			"...changing IO-APIC physical APIC ID to %d ...",
 | 
								"...changing IO-APIC physical APIC ID to %d ...",
 | 
				
			||||||
			mp_ioapics[apic].mp_apicid);
 | 
								mp_ioapics[apic].apicid);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
 | 
							reg_00.bits.ID = mp_ioapics[apic].apicid;
 | 
				
			||||||
		spin_lock_irqsave(&ioapic_lock, flags);
 | 
							spin_lock_irqsave(&ioapic_lock, flags);
 | 
				
			||||||
		io_apic_write(apic, 0, reg_00.raw);
 | 
							io_apic_write(apic, 0, reg_00.raw);
 | 
				
			||||||
		spin_unlock_irqrestore(&ioapic_lock, flags);
 | 
							spin_unlock_irqrestore(&ioapic_lock, flags);
 | 
				
			||||||
| 
						 | 
					@ -2188,7 +2187,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
 | 
				
			||||||
		spin_lock_irqsave(&ioapic_lock, flags);
 | 
							spin_lock_irqsave(&ioapic_lock, flags);
 | 
				
			||||||
		reg_00.raw = io_apic_read(apic, 0);
 | 
							reg_00.raw = io_apic_read(apic, 0);
 | 
				
			||||||
		spin_unlock_irqrestore(&ioapic_lock, flags);
 | 
							spin_unlock_irqrestore(&ioapic_lock, flags);
 | 
				
			||||||
		if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
 | 
							if (reg_00.bits.ID != mp_ioapics[apic].apicid)
 | 
				
			||||||
			printk("could not set ID!\n");
 | 
								printk("could not set ID!\n");
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			apic_printk(APIC_VERBOSE, " ok.\n");
 | 
								apic_printk(APIC_VERBOSE, " ok.\n");
 | 
				
			||||||
| 
						 | 
					@ -2383,7 +2382,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
 | 
				
			||||||
	if (cfg->move_in_progress)
 | 
						if (cfg->move_in_progress)
 | 
				
			||||||
		send_cleanup_vector(cfg);
 | 
							send_cleanup_vector(cfg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cpumask_copy(&desc->affinity, mask);
 | 
						cpumask_copy(desc->affinity, mask);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
 | 
					static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
 | 
				
			||||||
| 
						 | 
					@ -2405,11 +2404,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* everthing is clear. we have right of way */
 | 
						/* everthing is clear. we have right of way */
 | 
				
			||||||
	migrate_ioapic_irq_desc(desc, &desc->pending_mask);
 | 
						migrate_ioapic_irq_desc(desc, desc->pending_mask);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = 0;
 | 
						ret = 0;
 | 
				
			||||||
	desc->status &= ~IRQ_MOVE_PENDING;
 | 
						desc->status &= ~IRQ_MOVE_PENDING;
 | 
				
			||||||
	cpumask_clear(&desc->pending_mask);
 | 
						cpumask_clear(desc->pending_mask);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
unmask:
 | 
					unmask:
 | 
				
			||||||
	unmask_IO_APIC_irq_desc(desc);
 | 
						unmask_IO_APIC_irq_desc(desc);
 | 
				
			||||||
| 
						 | 
					@ -2434,7 +2433,7 @@ static void ir_irq_migration(struct work_struct *work)
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			desc->chip->set_affinity(irq, &desc->pending_mask);
 | 
								desc->chip->set_affinity(irq, desc->pending_mask);
 | 
				
			||||||
			spin_unlock_irqrestore(&desc->lock, flags);
 | 
								spin_unlock_irqrestore(&desc->lock, flags);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -2448,7 +2447,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (desc->status & IRQ_LEVEL) {
 | 
						if (desc->status & IRQ_LEVEL) {
 | 
				
			||||||
		desc->status |= IRQ_MOVE_PENDING;
 | 
							desc->status |= IRQ_MOVE_PENDING;
 | 
				
			||||||
		cpumask_copy(&desc->pending_mask, mask);
 | 
							cpumask_copy(desc->pending_mask, mask);
 | 
				
			||||||
		migrate_irq_remapped_level_desc(desc);
 | 
							migrate_irq_remapped_level_desc(desc);
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -2516,7 +2515,7 @@ static void irq_complete_move(struct irq_desc **descp)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* domain has not changed, but affinity did */
 | 
							/* domain has not changed, but affinity did */
 | 
				
			||||||
		me = smp_processor_id();
 | 
							me = smp_processor_id();
 | 
				
			||||||
		if (cpu_isset(me, desc->affinity)) {
 | 
							if (cpumask_test_cpu(me, desc->affinity)) {
 | 
				
			||||||
			*descp = desc = move_irq_desc(desc, me);
 | 
								*descp = desc = move_irq_desc(desc, me);
 | 
				
			||||||
			/* get the new one */
 | 
								/* get the new one */
 | 
				
			||||||
			cfg = desc->chip_data;
 | 
								cfg = desc->chip_data;
 | 
				
			||||||
| 
						 | 
					@ -3117,8 +3116,8 @@ static int ioapic_resume(struct sys_device *dev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_irqsave(&ioapic_lock, flags);
 | 
						spin_lock_irqsave(&ioapic_lock, flags);
 | 
				
			||||||
	reg_00.raw = io_apic_read(dev->id, 0);
 | 
						reg_00.raw = io_apic_read(dev->id, 0);
 | 
				
			||||||
	if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
 | 
						if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
 | 
				
			||||||
		reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
 | 
							reg_00.bits.ID = mp_ioapics[dev->id].apicid;
 | 
				
			||||||
		io_apic_write(dev->id, 0, reg_00.raw);
 | 
							io_apic_write(dev->id, 0, reg_00.raw);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	spin_unlock_irqrestore(&ioapic_lock, flags);
 | 
						spin_unlock_irqrestore(&ioapic_lock, flags);
 | 
				
			||||||
| 
						 | 
					@ -3183,7 +3182,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	irq = 0;
 | 
						irq = 0;
 | 
				
			||||||
	spin_lock_irqsave(&vector_lock, flags);
 | 
						spin_lock_irqsave(&vector_lock, flags);
 | 
				
			||||||
	for (new = irq_want; new < NR_IRQS; new++) {
 | 
						for (new = irq_want; new < nr_irqs; new++) {
 | 
				
			||||||
		if (platform_legacy_irq(new))
 | 
							if (platform_legacy_irq(new))
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3258,6 +3257,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
 | 
				
			||||||
	int err;
 | 
						int err;
 | 
				
			||||||
	unsigned dest;
 | 
						unsigned dest;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (disable_apic)
 | 
				
			||||||
 | 
							return -ENXIO;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cfg = irq_cfg(irq);
 | 
						cfg = irq_cfg(irq);
 | 
				
			||||||
	err = assign_irq_vector(irq, cfg, TARGET_CPUS);
 | 
						err = assign_irq_vector(irq, cfg, TARGET_CPUS);
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
| 
						 | 
					@ -3726,6 +3728,9 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
 | 
				
			||||||
	struct irq_cfg *cfg;
 | 
						struct irq_cfg *cfg;
 | 
				
			||||||
	int err;
 | 
						int err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (disable_apic)
 | 
				
			||||||
 | 
							return -ENXIO;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cfg = irq_cfg(irq);
 | 
						cfg = irq_cfg(irq);
 | 
				
			||||||
	err = assign_irq_vector(irq, cfg, TARGET_CPUS);
 | 
						err = assign_irq_vector(irq, cfg, TARGET_CPUS);
 | 
				
			||||||
	if (!err) {
 | 
						if (!err) {
 | 
				
			||||||
| 
						 | 
					@ -3760,7 +3765,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif /* CONFIG_HT_IRQ */
 | 
					#endif /* CONFIG_HT_IRQ */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
					#ifdef CONFIG_X86_UV
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Re-target the irq to the specified CPU and enable the specified MMR located
 | 
					 * Re-target the irq to the specified CPU and enable the specified MMR located
 | 
				
			||||||
 * on the specified blade to allow the sending of MSIs to the specified CPU.
 | 
					 * on the specified blade to allow the sending of MSIs to the specified CPU.
 | 
				
			||||||
| 
						 | 
					@ -3850,6 +3855,22 @@ void __init probe_nr_irqs_gsi(void)
 | 
				
			||||||
		nr_irqs_gsi = nr;
 | 
							nr_irqs_gsi = nr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_SPARSE_IRQ
 | 
				
			||||||
 | 
					int __init arch_probe_nr_irqs(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int nr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						nr = ((8 * nr_cpu_ids) > (32 * nr_ioapics) ?
 | 
				
			||||||
 | 
							(NR_VECTORS + (8 * nr_cpu_ids)) :
 | 
				
			||||||
 | 
							(NR_VECTORS + (32 * nr_ioapics)));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (nr < nr_irqs && nr > nr_irqs_gsi)
 | 
				
			||||||
 | 
							nr_irqs = nr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* --------------------------------------------------------------------------
 | 
					/* --------------------------------------------------------------------------
 | 
				
			||||||
                          ACPI-based IOAPIC Configuration
 | 
					                          ACPI-based IOAPIC Configuration
 | 
				
			||||||
   -------------------------------------------------------------------------- */
 | 
					   -------------------------------------------------------------------------- */
 | 
				
			||||||
| 
						 | 
					@ -3984,8 +4005,8 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
 | 
				
			||||||
		return -1;
 | 
							return -1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < mp_irq_entries; i++)
 | 
						for (i = 0; i < mp_irq_entries; i++)
 | 
				
			||||||
		if (mp_irqs[i].mp_irqtype == mp_INT &&
 | 
							if (mp_irqs[i].irqtype == mp_INT &&
 | 
				
			||||||
		    mp_irqs[i].mp_srcbusirq == bus_irq)
 | 
							    mp_irqs[i].srcbusirq == bus_irq)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
	if (i >= mp_irq_entries)
 | 
						if (i >= mp_irq_entries)
 | 
				
			||||||
		return -1;
 | 
							return -1;
 | 
				
			||||||
| 
						 | 
					@ -4039,7 +4060,7 @@ void __init setup_ioapic_dest(void)
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			if (desc->status &
 | 
								if (desc->status &
 | 
				
			||||||
			    (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
 | 
								    (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
 | 
				
			||||||
				mask = &desc->affinity;
 | 
									mask = desc->affinity;
 | 
				
			||||||
			else
 | 
								else
 | 
				
			||||||
				mask = TARGET_CPUS;
 | 
									mask = TARGET_CPUS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4100,7 +4121,7 @@ void __init ioapic_init_mappings(void)
 | 
				
			||||||
	ioapic_res = ioapic_setup_resources();
 | 
						ioapic_res = ioapic_setup_resources();
 | 
				
			||||||
	for (i = 0; i < nr_ioapics; i++) {
 | 
						for (i = 0; i < nr_ioapics; i++) {
 | 
				
			||||||
		if (smp_found_config) {
 | 
							if (smp_found_config) {
 | 
				
			||||||
			ioapic_phys = mp_ioapics[i].mp_apicaddr;
 | 
								ioapic_phys = mp_ioapics[i].apicaddr;
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
			if (!ioapic_phys) {
 | 
								if (!ioapic_phys) {
 | 
				
			||||||
				printk(KERN_ERR
 | 
									printk(KERN_ERR
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -36,11 +36,7 @@ void ack_bad_irq(unsigned int irq)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					 | 
				
			||||||
#define irq_stats(x)		(&per_cpu(irq_stat, x))
 | 
					#define irq_stats(x)		(&per_cpu(irq_stat, x))
 | 
				
			||||||
#else
 | 
					 | 
				
			||||||
# define irq_stats(x)		cpu_pda(x)
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * /proc/interrupts printing:
 | 
					 * /proc/interrupts printing:
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -248,7 +248,7 @@ void fixup_irqs(void)
 | 
				
			||||||
		if (irq == 2)
 | 
							if (irq == 2)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		affinity = &desc->affinity;
 | 
							affinity = desc->affinity;
 | 
				
			||||||
		if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
 | 
							if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
 | 
				
			||||||
			printk("Breaking affinity for irq %i\n", irq);
 | 
								printk("Breaking affinity for irq %i\n", irq);
 | 
				
			||||||
			affinity = cpu_all_mask;
 | 
								affinity = cpu_all_mask;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,6 +18,13 @@
 | 
				
			||||||
#include <linux/smp.h>
 | 
					#include <linux/smp.h>
 | 
				
			||||||
#include <asm/io_apic.h>
 | 
					#include <asm/io_apic.h>
 | 
				
			||||||
#include <asm/idle.h>
 | 
					#include <asm/idle.h>
 | 
				
			||||||
 | 
					#include <asm/apic.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 | 
				
			||||||
 | 
					EXPORT_PER_CPU_SYMBOL(irq_stat);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DEFINE_PER_CPU(struct pt_regs *, irq_regs);
 | 
				
			||||||
 | 
					EXPORT_PER_CPU_SYMBOL(irq_regs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Probabilistic stack overflow check:
 | 
					 * Probabilistic stack overflow check:
 | 
				
			||||||
| 
						 | 
					@ -100,7 +107,7 @@ void fixup_irqs(void)
 | 
				
			||||||
		/* interrupt's are disabled at this point */
 | 
							/* interrupt's are disabled at this point */
 | 
				
			||||||
		spin_lock(&desc->lock);
 | 
							spin_lock(&desc->lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		affinity = &desc->affinity;
 | 
							affinity = desc->affinity;
 | 
				
			||||||
		if (!irq_has_action(irq) ||
 | 
							if (!irq_has_action(irq) ||
 | 
				
			||||||
		    cpumask_equal(affinity, cpu_online_mask)) {
 | 
							    cpumask_equal(affinity, cpu_online_mask)) {
 | 
				
			||||||
			spin_unlock(&desc->lock);
 | 
								spin_unlock(&desc->lock);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -149,8 +149,15 @@ void __init native_init_IRQ(void)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
 | 
						alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* IPI for invalidation */
 | 
						/* IPIs for invalidation */
 | 
				
			||||||
	alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
 | 
						alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
 | 
				
			||||||
 | 
						alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
 | 
				
			||||||
 | 
						alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
 | 
				
			||||||
 | 
						alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
 | 
				
			||||||
 | 
						alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
 | 
				
			||||||
 | 
						alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
 | 
				
			||||||
 | 
						alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
 | 
				
			||||||
 | 
						alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* IPI for generic function call */
 | 
						/* IPI for generic function call */
 | 
				
			||||||
	alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
 | 
						alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -87,9 +87,9 @@
 | 
				
			||||||
#include <linux/cpu.h>
 | 
					#include <linux/cpu.h>
 | 
				
			||||||
#include <linux/firmware.h>
 | 
					#include <linux/firmware.h>
 | 
				
			||||||
#include <linux/platform_device.h>
 | 
					#include <linux/platform_device.h>
 | 
				
			||||||
 | 
					#include <linux/uaccess.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <asm/msr.h>
 | 
					#include <asm/msr.h>
 | 
				
			||||||
#include <asm/uaccess.h>
 | 
					 | 
				
			||||||
#include <asm/processor.h>
 | 
					#include <asm/processor.h>
 | 
				
			||||||
#include <asm/microcode.h>
 | 
					#include <asm/microcode.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
 | 
						ret = generic_load_microcode(cpu, (void *)firmware->data,
 | 
				
			||||||
			&get_ucode_fw);
 | 
									     firmware->size, &get_ucode_fw);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	release_firmware(firmware);
 | 
						release_firmware(firmware);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -122,7 +122,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		default:
 | 
							default:
 | 
				
			||||||
			printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n",
 | 
								printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
 | 
				
			||||||
			       me->name, ELF64_R_TYPE(rel[i].r_info));
 | 
								       me->name, ELF64_R_TYPE(rel[i].r_info));
 | 
				
			||||||
			return -ENOEXEC;
 | 
								return -ENOEXEC;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -143,7 +143,7 @@ int apply_relocate(Elf_Shdr *sechdrs,
 | 
				
			||||||
		   unsigned int relsec,
 | 
							   unsigned int relsec,
 | 
				
			||||||
		   struct module *me)
 | 
							   struct module *me)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	printk("non add relocation not supported\n");
 | 
						printk(KERN_ERR "non add relocation not supported\n");
 | 
				
			||||||
	return -ENOSYS;
 | 
						return -ENOSYS;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -144,11 +144,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
 | 
				
			||||||
	if (bad_ioapic(m->apicaddr))
 | 
						if (bad_ioapic(m->apicaddr))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr;
 | 
						mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
 | 
				
			||||||
	mp_ioapics[nr_ioapics].mp_apicid = m->apicid;
 | 
						mp_ioapics[nr_ioapics].apicid = m->apicid;
 | 
				
			||||||
	mp_ioapics[nr_ioapics].mp_type = m->type;
 | 
						mp_ioapics[nr_ioapics].type = m->type;
 | 
				
			||||||
	mp_ioapics[nr_ioapics].mp_apicver = m->apicver;
 | 
						mp_ioapics[nr_ioapics].apicver = m->apicver;
 | 
				
			||||||
	mp_ioapics[nr_ioapics].mp_flags = m->flags;
 | 
						mp_ioapics[nr_ioapics].flags = m->flags;
 | 
				
			||||||
	nr_ioapics++;
 | 
						nr_ioapics++;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -160,55 +160,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
 | 
				
			||||||
		m->srcbusirq, m->dstapic, m->dstirq);
 | 
							m->srcbusirq, m->dstapic, m->dstirq);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
 | 
					static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
 | 
						apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
 | 
				
			||||||
		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
 | 
							" IRQ %02x, APIC ID %x, APIC INT %02x\n",
 | 
				
			||||||
		mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
 | 
							mp_irq->irqtype, mp_irq->irqflag & 3,
 | 
				
			||||||
		(mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
 | 
							(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
 | 
				
			||||||
		mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq);
 | 
							mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void __init assign_to_mp_irq(struct mpc_intsrc *m,
 | 
					static void __init assign_to_mp_irq(struct mpc_intsrc *m,
 | 
				
			||||||
				    struct mp_config_intsrc *mp_irq)
 | 
									    struct mpc_intsrc *mp_irq)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	mp_irq->mp_dstapic = m->dstapic;
 | 
						mp_irq->dstapic = m->dstapic;
 | 
				
			||||||
	mp_irq->mp_type = m->type;
 | 
						mp_irq->type = m->type;
 | 
				
			||||||
	mp_irq->mp_irqtype = m->irqtype;
 | 
						mp_irq->irqtype = m->irqtype;
 | 
				
			||||||
	mp_irq->mp_irqflag = m->irqflag;
 | 
						mp_irq->irqflag = m->irqflag;
 | 
				
			||||||
	mp_irq->mp_srcbus = m->srcbus;
 | 
						mp_irq->srcbus = m->srcbus;
 | 
				
			||||||
	mp_irq->mp_srcbusirq = m->srcbusirq;
 | 
						mp_irq->srcbusirq = m->srcbusirq;
 | 
				
			||||||
	mp_irq->mp_dstirq = m->dstirq;
 | 
						mp_irq->dstirq = m->dstirq;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq,
 | 
					static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
 | 
				
			||||||
					struct mpc_intsrc *m)
 | 
										struct mpc_intsrc *m)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	m->dstapic = mp_irq->mp_dstapic;
 | 
						m->dstapic = mp_irq->dstapic;
 | 
				
			||||||
	m->type = mp_irq->mp_type;
 | 
						m->type = mp_irq->type;
 | 
				
			||||||
	m->irqtype = mp_irq->mp_irqtype;
 | 
						m->irqtype = mp_irq->irqtype;
 | 
				
			||||||
	m->irqflag = mp_irq->mp_irqflag;
 | 
						m->irqflag = mp_irq->irqflag;
 | 
				
			||||||
	m->srcbus = mp_irq->mp_srcbus;
 | 
						m->srcbus = mp_irq->srcbus;
 | 
				
			||||||
	m->srcbusirq = mp_irq->mp_srcbusirq;
 | 
						m->srcbusirq = mp_irq->srcbusirq;
 | 
				
			||||||
	m->dstirq = mp_irq->mp_dstirq;
 | 
						m->dstirq = mp_irq->dstirq;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq,
 | 
					static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
 | 
				
			||||||
					struct mpc_intsrc *m)
 | 
										struct mpc_intsrc *m)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (mp_irq->mp_dstapic != m->dstapic)
 | 
						if (mp_irq->dstapic != m->dstapic)
 | 
				
			||||||
		return 1;
 | 
							return 1;
 | 
				
			||||||
	if (mp_irq->mp_type != m->type)
 | 
						if (mp_irq->type != m->type)
 | 
				
			||||||
		return 2;
 | 
							return 2;
 | 
				
			||||||
	if (mp_irq->mp_irqtype != m->irqtype)
 | 
						if (mp_irq->irqtype != m->irqtype)
 | 
				
			||||||
		return 3;
 | 
							return 3;
 | 
				
			||||||
	if (mp_irq->mp_irqflag != m->irqflag)
 | 
						if (mp_irq->irqflag != m->irqflag)
 | 
				
			||||||
		return 4;
 | 
							return 4;
 | 
				
			||||||
	if (mp_irq->mp_srcbus != m->srcbus)
 | 
						if (mp_irq->srcbus != m->srcbus)
 | 
				
			||||||
		return 5;
 | 
							return 5;
 | 
				
			||||||
	if (mp_irq->mp_srcbusirq != m->srcbusirq)
 | 
						if (mp_irq->srcbusirq != m->srcbusirq)
 | 
				
			||||||
		return 6;
 | 
							return 6;
 | 
				
			||||||
	if (mp_irq->mp_dstirq != m->dstirq)
 | 
						if (mp_irq->dstirq != m->dstirq)
 | 
				
			||||||
		return 7;
 | 
							return 7;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					@ -417,7 +417,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
 | 
				
			||||||
	intsrc.type = MP_INTSRC;
 | 
						intsrc.type = MP_INTSRC;
 | 
				
			||||||
	intsrc.irqflag = 0;	/* conforming */
 | 
						intsrc.irqflag = 0;	/* conforming */
 | 
				
			||||||
	intsrc.srcbus = 0;
 | 
						intsrc.srcbus = 0;
 | 
				
			||||||
	intsrc.dstapic = mp_ioapics[0].mp_apicid;
 | 
						intsrc.dstapic = mp_ioapics[0].apicid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	intsrc.irqtype = mp_INT;
 | 
						intsrc.irqtype = mp_INT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -570,14 +570,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct intel_mp_floating *mpf_found;
 | 
					static struct mpf_intel *mpf_found;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Scan the memory blocks for an SMP configuration block.
 | 
					 * Scan the memory blocks for an SMP configuration block.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void __init __get_smp_config(unsigned int early)
 | 
					static void __init __get_smp_config(unsigned int early)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct intel_mp_floating *mpf = mpf_found;
 | 
						struct mpf_intel *mpf = mpf_found;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!mpf)
 | 
						if (!mpf)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
| 
						 | 
					@ -598,9 +598,9 @@ static void __init __get_smp_config(unsigned int early)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
 | 
						printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
 | 
				
			||||||
	       mpf->mpf_specification);
 | 
						       mpf->specification);
 | 
				
			||||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
 | 
					#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
 | 
				
			||||||
	if (mpf->mpf_feature2 & (1 << 7)) {
 | 
						if (mpf->feature2 & (1 << 7)) {
 | 
				
			||||||
		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
 | 
							printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
 | 
				
			||||||
		pic_mode = 1;
 | 
							pic_mode = 1;
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
| 
						 | 
					@ -611,7 +611,7 @@ static void __init __get_smp_config(unsigned int early)
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Now see if we need to read further.
 | 
						 * Now see if we need to read further.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (mpf->mpf_feature1 != 0) {
 | 
						if (mpf->feature1 != 0) {
 | 
				
			||||||
		if (early) {
 | 
							if (early) {
 | 
				
			||||||
			/*
 | 
								/*
 | 
				
			||||||
			 * local APIC has default address
 | 
								 * local APIC has default address
 | 
				
			||||||
| 
						 | 
					@ -621,16 +621,16 @@ static void __init __get_smp_config(unsigned int early)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		printk(KERN_INFO "Default MP configuration #%d\n",
 | 
							printk(KERN_INFO "Default MP configuration #%d\n",
 | 
				
			||||||
		       mpf->mpf_feature1);
 | 
							       mpf->feature1);
 | 
				
			||||||
		construct_default_ISA_mptable(mpf->mpf_feature1);
 | 
							construct_default_ISA_mptable(mpf->feature1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	} else if (mpf->mpf_physptr) {
 | 
						} else if (mpf->physptr) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * Read the physical hardware table.  Anything here will
 | 
							 * Read the physical hardware table.  Anything here will
 | 
				
			||||||
		 * override the defaults.
 | 
							 * override the defaults.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) {
 | 
							if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) {
 | 
				
			||||||
#ifdef CONFIG_X86_LOCAL_APIC
 | 
					#ifdef CONFIG_X86_LOCAL_APIC
 | 
				
			||||||
			smp_found_config = 0;
 | 
								smp_found_config = 0;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					@ -688,19 +688,19 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
 | 
				
			||||||
				  unsigned reserve)
 | 
									  unsigned reserve)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int *bp = phys_to_virt(base);
 | 
						unsigned int *bp = phys_to_virt(base);
 | 
				
			||||||
	struct intel_mp_floating *mpf;
 | 
						struct mpf_intel *mpf;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
 | 
						apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
 | 
				
			||||||
			bp, length);
 | 
								bp, length);
 | 
				
			||||||
	BUILD_BUG_ON(sizeof(*mpf) != 16);
 | 
						BUILD_BUG_ON(sizeof(*mpf) != 16);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (length > 0) {
 | 
						while (length > 0) {
 | 
				
			||||||
		mpf = (struct intel_mp_floating *)bp;
 | 
							mpf = (struct mpf_intel *)bp;
 | 
				
			||||||
		if ((*bp == SMP_MAGIC_IDENT) &&
 | 
							if ((*bp == SMP_MAGIC_IDENT) &&
 | 
				
			||||||
		    (mpf->mpf_length == 1) &&
 | 
							    (mpf->length == 1) &&
 | 
				
			||||||
		    !mpf_checksum((unsigned char *)bp, 16) &&
 | 
							    !mpf_checksum((unsigned char *)bp, 16) &&
 | 
				
			||||||
		    ((mpf->mpf_specification == 1)
 | 
							    ((mpf->specification == 1)
 | 
				
			||||||
		     || (mpf->mpf_specification == 4))) {
 | 
							     || (mpf->specification == 4))) {
 | 
				
			||||||
#ifdef CONFIG_X86_LOCAL_APIC
 | 
					#ifdef CONFIG_X86_LOCAL_APIC
 | 
				
			||||||
			smp_found_config = 1;
 | 
								smp_found_config = 1;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					@ -713,7 +713,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
 | 
				
			||||||
				return 1;
 | 
									return 1;
 | 
				
			||||||
			reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
 | 
								reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
 | 
				
			||||||
					BOOTMEM_DEFAULT);
 | 
										BOOTMEM_DEFAULT);
 | 
				
			||||||
			if (mpf->mpf_physptr) {
 | 
								if (mpf->physptr) {
 | 
				
			||||||
				unsigned long size = PAGE_SIZE;
 | 
									unsigned long size = PAGE_SIZE;
 | 
				
			||||||
#ifdef CONFIG_X86_32
 | 
					#ifdef CONFIG_X86_32
 | 
				
			||||||
				/*
 | 
									/*
 | 
				
			||||||
| 
						 | 
					@ -722,14 +722,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
 | 
				
			||||||
				 * the bottom is mapped now.
 | 
									 * the bottom is mapped now.
 | 
				
			||||||
				 * PC-9800's MPC table places on the very last
 | 
									 * PC-9800's MPC table places on the very last
 | 
				
			||||||
				 * of physical memory; so that simply reserving
 | 
									 * of physical memory; so that simply reserving
 | 
				
			||||||
				 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
 | 
									 * PAGE_SIZE from mpf->physptr yields BUG()
 | 
				
			||||||
				 * in reserve_bootmem.
 | 
									 * in reserve_bootmem.
 | 
				
			||||||
				 */
 | 
									 */
 | 
				
			||||||
				unsigned long end = max_low_pfn * PAGE_SIZE;
 | 
									unsigned long end = max_low_pfn * PAGE_SIZE;
 | 
				
			||||||
				if (mpf->mpf_physptr + size > end)
 | 
									if (mpf->physptr + size > end)
 | 
				
			||||||
					size = end - mpf->mpf_physptr;
 | 
										size = end - mpf->physptr;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
				reserve_bootmem_generic(mpf->mpf_physptr, size,
 | 
									reserve_bootmem_generic(mpf->physptr, size,
 | 
				
			||||||
						BOOTMEM_DEFAULT);
 | 
											BOOTMEM_DEFAULT);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -809,15 +809,15 @@ static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
 | 
				
			||||||
	/* not legacy */
 | 
						/* not legacy */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < mp_irq_entries; i++) {
 | 
						for (i = 0; i < mp_irq_entries; i++) {
 | 
				
			||||||
		if (mp_irqs[i].mp_irqtype != mp_INT)
 | 
							if (mp_irqs[i].irqtype != mp_INT)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (mp_irqs[i].mp_irqflag != 0x0f)
 | 
							if (mp_irqs[i].irqflag != 0x0f)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (mp_irqs[i].mp_srcbus != m->srcbus)
 | 
							if (mp_irqs[i].srcbus != m->srcbus)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		if (mp_irqs[i].mp_srcbusirq != m->srcbusirq)
 | 
							if (mp_irqs[i].srcbusirq != m->srcbusirq)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		if (irq_used[i]) {
 | 
							if (irq_used[i]) {
 | 
				
			||||||
			/* already claimed */
 | 
								/* already claimed */
 | 
				
			||||||
| 
						 | 
					@ -922,10 +922,10 @@ static int  __init replace_intsrc_all(struct mpc_table *mpc,
 | 
				
			||||||
		if (irq_used[i])
 | 
							if (irq_used[i])
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (mp_irqs[i].mp_irqtype != mp_INT)
 | 
							if (mp_irqs[i].irqtype != mp_INT)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (mp_irqs[i].mp_irqflag != 0x0f)
 | 
							if (mp_irqs[i].irqflag != 0x0f)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (nr_m_spare > 0) {
 | 
							if (nr_m_spare > 0) {
 | 
				
			||||||
| 
						 | 
					@ -1001,7 +1001,7 @@ static int __init update_mp_table(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	char str[16];
 | 
						char str[16];
 | 
				
			||||||
	char oem[10];
 | 
						char oem[10];
 | 
				
			||||||
	struct intel_mp_floating *mpf;
 | 
						struct mpf_intel *mpf;
 | 
				
			||||||
	struct mpc_table *mpc, *mpc_new;
 | 
						struct mpc_table *mpc, *mpc_new;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!enable_update_mptable)
 | 
						if (!enable_update_mptable)
 | 
				
			||||||
| 
						 | 
					@ -1014,19 +1014,19 @@ static int __init update_mp_table(void)
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Now see if we need to go further.
 | 
						 * Now see if we need to go further.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (mpf->mpf_feature1 != 0)
 | 
						if (mpf->feature1 != 0)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!mpf->mpf_physptr)
 | 
						if (!mpf->physptr)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mpc = phys_to_virt(mpf->mpf_physptr);
 | 
						mpc = phys_to_virt(mpf->physptr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!smp_check_mpc(mpc, oem, str))
 | 
						if (!smp_check_mpc(mpc, oem, str))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
 | 
						printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
 | 
				
			||||||
	printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr);
 | 
						printk(KERN_INFO "physptr: %x\n", mpf->physptr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (mpc_new_phys && mpc->length > mpc_new_length) {
 | 
						if (mpc_new_phys && mpc->length > mpc_new_length) {
 | 
				
			||||||
		mpc_new_phys = 0;
 | 
							mpc_new_phys = 0;
 | 
				
			||||||
| 
						 | 
					@ -1047,23 +1047,23 @@ static int __init update_mp_table(void)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		printk(KERN_INFO "use in-positon replacing\n");
 | 
							printk(KERN_INFO "use in-positon replacing\n");
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		mpf->mpf_physptr = mpc_new_phys;
 | 
							mpf->physptr = mpc_new_phys;
 | 
				
			||||||
		mpc_new = phys_to_virt(mpc_new_phys);
 | 
							mpc_new = phys_to_virt(mpc_new_phys);
 | 
				
			||||||
		memcpy(mpc_new, mpc, mpc->length);
 | 
							memcpy(mpc_new, mpc, mpc->length);
 | 
				
			||||||
		mpc = mpc_new;
 | 
							mpc = mpc_new;
 | 
				
			||||||
		/* check if we can modify that */
 | 
							/* check if we can modify that */
 | 
				
			||||||
		if (mpc_new_phys - mpf->mpf_physptr) {
 | 
							if (mpc_new_phys - mpf->physptr) {
 | 
				
			||||||
			struct intel_mp_floating *mpf_new;
 | 
								struct mpf_intel *mpf_new;
 | 
				
			||||||
			/* steal 16 bytes from [0, 1k) */
 | 
								/* steal 16 bytes from [0, 1k) */
 | 
				
			||||||
			printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
 | 
								printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
 | 
				
			||||||
			mpf_new = phys_to_virt(0x400 - 16);
 | 
								mpf_new = phys_to_virt(0x400 - 16);
 | 
				
			||||||
			memcpy(mpf_new, mpf, 16);
 | 
								memcpy(mpf_new, mpf, 16);
 | 
				
			||||||
			mpf = mpf_new;
 | 
								mpf = mpf_new;
 | 
				
			||||||
			mpf->mpf_physptr = mpc_new_phys;
 | 
								mpf->physptr = mpc_new_phys;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		mpf->mpf_checksum = 0;
 | 
							mpf->checksum = 0;
 | 
				
			||||||
		mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16);
 | 
							mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
 | 
				
			||||||
		printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr);
 | 
							printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -35,10 +35,10 @@
 | 
				
			||||||
#include <linux/device.h>
 | 
					#include <linux/device.h>
 | 
				
			||||||
#include <linux/cpu.h>
 | 
					#include <linux/cpu.h>
 | 
				
			||||||
#include <linux/notifier.h>
 | 
					#include <linux/notifier.h>
 | 
				
			||||||
 | 
					#include <linux/uaccess.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <asm/processor.h>
 | 
					#include <asm/processor.h>
 | 
				
			||||||
#include <asm/msr.h>
 | 
					#include <asm/msr.h>
 | 
				
			||||||
#include <asm/uaccess.h>
 | 
					 | 
				
			||||||
#include <asm/system.h>
 | 
					#include <asm/system.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct class *msr_class;
 | 
					static struct class *msr_class;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -61,11 +61,7 @@ static int endflag __initdata;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned int get_nmi_count(int cpu)
 | 
					static inline unsigned int get_nmi_count(int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
						return per_cpu(irq_stat, cpu).__nmi_count;
 | 
				
			||||||
	return cpu_pda(cpu)->__nmi_count;
 | 
					 | 
				
			||||||
#else
 | 
					 | 
				
			||||||
	return nmi_count(cpu);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int mce_in_progress(void)
 | 
					static inline int mce_in_progress(void)
 | 
				
			||||||
| 
						 | 
					@ -82,12 +78,8 @@ static inline int mce_in_progress(void)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static inline unsigned int get_timer_irqs(int cpu)
 | 
					static inline unsigned int get_timer_irqs(int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
					 | 
				
			||||||
	return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
 | 
					 | 
				
			||||||
#else
 | 
					 | 
				
			||||||
	return per_cpu(irq_stat, cpu).apic_timer_irqs +
 | 
						return per_cpu(irq_stat, cpu).apic_timer_irqs +
 | 
				
			||||||
		per_cpu(irq_stat, cpu).irq0_irqs;
 | 
							per_cpu(irq_stat, cpu).irq0_irqs;
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SMP
 | 
					#ifdef CONFIG_SMP
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -66,9 +66,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 | 
				
			||||||
DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 | 
					DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 | 
				
			||||||
EXPORT_PER_CPU_SYMBOL(current_task);
 | 
					EXPORT_PER_CPU_SYMBOL(current_task);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DEFINE_PER_CPU(int, cpu_number);
 | 
					 | 
				
			||||||
EXPORT_PER_CPU_SYMBOL(cpu_number);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Return saved PC of a blocked thread.
 | 
					 * Return saved PC of a blocked thread.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					@ -111,7 +108,6 @@ void cpu_idle(void)
 | 
				
			||||||
				play_dead();
 | 
									play_dead();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			local_irq_disable();
 | 
								local_irq_disable();
 | 
				
			||||||
			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
 | 
					 | 
				
			||||||
			/* Don't trace irqs off for idle */
 | 
								/* Don't trace irqs off for idle */
 | 
				
			||||||
			stop_critical_timings();
 | 
								stop_critical_timings();
 | 
				
			||||||
			pm_idle();
 | 
								pm_idle();
 | 
				
			||||||
| 
						 | 
					@ -591,7 +587,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 | 
				
			||||||
	if (prev->gs | next->gs)
 | 
						if (prev->gs | next->gs)
 | 
				
			||||||
		loadsegment(gs, next->gs);
 | 
							loadsegment(gs, next->gs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	x86_write_percpu(current_task, next_p);
 | 
						percpu_write(current_task, next_p);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return prev_p;
 | 
						return prev_p;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
		Reference in a new issue