Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
KVM: ia64: Makefile fix for forcing to re-generate asm-offsets.h
KVM: Future-proof device assignment ABI
KVM: ia64: Fix halt emulation logic
KVM: Fix guest shared interrupt with in-kernel irqchip
KVM: MMU: sync root on paravirt TLB flush
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 9d8eb55..eb471c7 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -92,7 +92,7 @@
way the previous scheduler had, and has no heuristics whatsoever. There is
only one central tunable (you have to switch on CONFIG_SCHED_DEBUG):
- /proc/sys/kernel/sched_granularity_ns
+ /proc/sys/kernel/sched_min_granularity_ns
which can be used to tune the scheduler from "desktop" (i.e., low latencies) to
"server" (i.e., good batching) workloads. It defaults to a setting suitable
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 70b7645..8116a33 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -241,19 +241,17 @@
Say Y if you are unsure.
config SMALL_STACK
- bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb"
- depends on PACK_STACK && !LOCKDEP
+ bool "Use 8kb for kernel stack instead of 16kb"
+ depends on PACK_STACK && 64BIT && !LOCKDEP
help
If you say Y here and the compiler supports the -mkernel-backchain
- option the kernel will use a smaller kernel stack size. For 31 bit
- the reduced size is 4kb instead of 8kb and for 64 bit it is 8kb
- instead of 16kb. This allows to run more thread on a system and
- reduces the pressure on the memory management for higher order
- page allocations.
+ option the kernel will use a smaller kernel stack size. The reduced
+ size is 8kb instead of 16kb. This allows to run more threads on a
+ system and reduces the pressure on the memory management for higher
+ order page allocations.
Say N if you are unsure.
-
config CHECK_STACK
bool "Detect kernel stack overflow"
help
@@ -384,7 +382,7 @@
choice
prompt "IPL method generated into head.S"
depends on IPL
- default IPL_TAPE
+ default IPL_VM
help
Select "tape" if you want to IPL the image from a Tape.
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index a7f8979..a06a47c 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -424,7 +424,7 @@
*/
int appldata_register_ops(struct appldata_ops *ops)
{
- if ((ops->size > APPLDATA_MAX_REC_SIZE) || (ops->size < 0))
+ if (ops->size > APPLDATA_MAX_REC_SIZE)
return -EINVAL;
ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL);
diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h
index 1461002..c13568b 100644
--- a/arch/s390/include/asm/kvm_virtio.h
+++ b/arch/s390/include/asm/kvm_virtio.h
@@ -52,7 +52,7 @@
#ifdef __KERNEL__
/* early virtio console setup */
-#ifdef CONFIG_VIRTIO_CONSOLE
+#ifdef CONFIG_S390_GUEST
extern void s390_virtio_console_init(void);
#else
static inline void s390_virtio_console_init(void)
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 5dd5e7b..d2b4ff8 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -7,7 +7,8 @@
unsigned long asce_bits;
unsigned long asce_limit;
int noexec;
- int pgstes;
+ int has_pgste; /* The mmu context has extended page tables */
+ int alloc_pgste; /* cloned contexts will have extended page tables */
} mm_context_t;
#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 4c2fbf4..28ec870 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -20,12 +20,25 @@
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
- if (current->mm->context.pgstes) {
+ if (current->mm->context.alloc_pgste) {
+ /*
+ * alloc_pgste indicates, that any NEW context will be created
+ * with extended page tables. The old context is unchanged. The
+ * page table allocation and the page table operations will
+ * look at has_pgste to distinguish normal and extended page
+ * tables. The only way to create extended page tables is to
+ * set alloc_pgste and then create a new context (e.g. dup_mm).
+ * The page table allocation is called after init_new_context
+ * and if has_pgste is set, it will create extended page
+ * tables.
+ */
mm->context.noexec = 0;
- mm->context.pgstes = 1;
+ mm->context.has_pgste = 1;
+ mm->context.alloc_pgste = 1;
} else {
mm->context.noexec = s390_noexec;
- mm->context.pgstes = 0;
+ mm->context.has_pgste = 0;
+ mm->context.alloc_pgste = 0;
}
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 1a928f8..7fc7613 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -679,7 +679,7 @@
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- if (mm->context.pgstes)
+ if (mm->context.has_pgste)
ptep_rcp_copy(ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec)
@@ -763,7 +763,7 @@
struct page *page;
unsigned int skey;
- if (!mm->context.pgstes)
+ if (!mm->context.has_pgste)
return -EINVAL;
rcp_lock(ptep);
pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
@@ -794,7 +794,7 @@
int young;
unsigned long *pgste;
- if (!vma->vm_mm->context.pgstes)
+ if (!vma->vm_mm->context.has_pgste)
return 0;
physpage = pte_val(*ptep) & PAGE_MASK;
pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
@@ -844,7 +844,7 @@
static inline void ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
- if (mm->context.pgstes) {
+ if (mm->context.has_pgste) {
rcp_lock(ptep);
__ptep_ipte(address, ptep);
ptep_rcp_copy(ptep);
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index de3fad6..c1eaf96 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -15,13 +15,8 @@
* Size of kernel stack for each process
*/
#ifndef __s390x__
-#ifndef __SMALL_STACK
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
-#else
-#define THREAD_ORDER 0
-#define ASYNC_ORDER 0
-#endif
#else /* __s390x__ */
#ifndef __SMALL_STACK
#define THREAD_ORDER 2
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 9e8b1f9..b559568 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1119,9 +1119,7 @@
return rc;
}
-static ssize_t __ref rescan_store(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf,
+static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
size_t count)
{
int rc;
@@ -1129,12 +1127,10 @@
rc = smp_rescan_cpus();
return rc ? rc : count;
}
-static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
+static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
#endif /* CONFIG_HOTPLUG_CPU */
-static ssize_t dispatching_show(struct sys_device *dev,
- struct sysdev_attribute *attr,
- char *buf)
+static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
{
ssize_t count;
@@ -1144,9 +1140,8 @@
return count;
}
-static ssize_t dispatching_store(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf, size_t count)
+static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf,
+ size_t count)
{
int val, rc;
char delim;
@@ -1168,7 +1163,8 @@
put_online_cpus();
return rc ? rc : count;
}
-static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store);
+static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
+ dispatching_store);
static int __init topology_init(void)
{
@@ -1178,13 +1174,11 @@
register_cpu_notifier(&smp_cpu_nb);
#ifdef CONFIG_HOTPLUG_CPU
- rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
- &attr_rescan.attr);
+ rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
if (rc)
return rc;
#endif
- rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
- &attr_dispatching.attr);
+ rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
if (rc)
return rc;
for_each_present_cpu(cpu) {
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 3d98ba8..ef3635b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -169,7 +169,7 @@
unsigned long *table;
unsigned long bits;
- bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
+ bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
spin_lock(&mm->page_table_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
@@ -186,7 +186,7 @@
pgtable_page_ctor(page);
page->flags &= ~FRAG_MASK;
table = (unsigned long *) page_to_phys(page);
- if (mm->context.pgstes)
+ if (mm->context.has_pgste)
clear_table_pgstes(table);
else
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
@@ -210,7 +210,7 @@
struct page *page;
unsigned long bits;
- bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
+ bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
@@ -257,7 +257,7 @@
struct mm_struct *mm, *old_mm;
/* Do we have pgstes? if yes, we are done */
- if (tsk->mm->context.pgstes)
+ if (tsk->mm->context.has_pgste)
return 0;
/* lets check if we are allowed to replace the mm */
@@ -269,14 +269,14 @@
}
task_unlock(tsk);
- /* we copy the mm with pgstes enabled */
- tsk->mm->context.pgstes = 1;
+ /* we copy the mm and let dup_mm create the page tables with_pgstes */
+ tsk->mm->context.alloc_pgste = 1;
mm = dup_mm(tsk);
- tsk->mm->context.pgstes = 0;
+ tsk->mm->context.alloc_pgste = 0;
if (!mm)
return -ENOMEM;
- /* Now lets check again if somebody attached ptrace etc */
+ /* Now lets check again if something happened */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 4a5397b..7f225a4 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -255,9 +255,11 @@
static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{
-#ifdef CONFIG_X86_64
unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
+ if (dma_mask <= DMA_24BIT_MASK)
+ gfp |= GFP_DMA;
+#ifdef CONFIG_X86_64
if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
gfp |= GFP_DMA32;
#endif
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 680a065..2c7dbdb 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -15,7 +15,6 @@
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <asm/smp.h>
@@ -398,16 +397,16 @@
printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
- uv_blade_info = alloc_bootmem_pages(bytes);
+ uv_blade_info = kmalloc(bytes, GFP_KERNEL);
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
- uv_node_to_blade = alloc_bootmem_pages(bytes);
+ uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
memset(uv_node_to_blade, 255, bytes);
bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
- uv_cpu_to_blade = alloc_bootmem_pages(bytes);
+ uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
memset(uv_cpu_to_blade, 255, bytes);
blade = 0;
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index c4ce033..3c539d1 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -18,9 +18,21 @@
return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
}
+static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
+{
+ void *vaddr;
+
+ vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
+ if (vaddr)
+ return vaddr;
+
+ return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
+}
+
struct dma_mapping_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
- .alloc_coherent = swiotlb_alloc_coherent,
+ .alloc_coherent = x86_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
.map_single = swiotlb_map_single_phys,
.unmap_single = swiotlb_unmap_single,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b8e461d..f79a02f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -350,8 +350,10 @@
* pagetable pages as RO. So assume someone who pre-setup
* these mappings are more intelligent.
*/
- if (pte_val(*pte))
+ if (pte_val(*pte)) {
+ pages++;
continue;
+ }
if (0)
printk(" pte=%p addr=%lx pte=%016lx\n",
@@ -418,8 +420,10 @@
* not differ with respect to page frame and
* attributes.
*/
- if (page_size_mask & (1 << PG_LEVEL_2M))
+ if (page_size_mask & (1 << PG_LEVEL_2M)) {
+ pages++;
continue;
+ }
new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
}
@@ -499,8 +503,10 @@
* not differ with respect to page frame and
* attributes.
*/
- if (page_size_mask & (1 << PG_LEVEL_1G))
+ if (page_size_mask & (1 << PG_LEVEL_1G)) {
+ pages++;
continue;
+ }
prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
}
@@ -831,7 +837,7 @@
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- last_mapped_pfn = init_memory_mapping(start, start + size-1);
+ last_mapped_pfn = init_memory_mapping(start, start + size);
if (last_mapped_pfn > max_pfn_mapped)
max_pfn_mapped = last_mapped_pfn;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index d4d52f5..aba77b2 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -246,11 +246,21 @@
{
unsigned long address = (unsigned long)vaddr;
unsigned int level;
- pte_t *pte = lookup_address(address, &level);
- unsigned offset = address & ~PAGE_MASK;
+ pte_t *pte;
+ unsigned offset;
+ /*
+ * if the PFN is in the linear mapped vaddr range, we can just use
+ * the (quick) virt_to_machine() p2m lookup
+ */
+ if (virt_addr_valid(vaddr))
+ return virt_to_machine(vaddr);
+
+ /* otherwise we have to do a (slower) full page-table walk */
+
+ pte = lookup_address(address, &level);
BUG_ON(pte == NULL);
-
+ offset = address & ~PAGE_MASK;
return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
}
@@ -410,7 +420,7 @@
xen_mc_batch();
- u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
+ u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
u.val = pte_val_ma(pte);
xen_extend_mmu_update(&u);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index aeadd00..a67b8e7c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -49,6 +49,17 @@
#define DRV_NAME "ahci"
#define DRV_VERSION "3.0"
+/* Enclosure Management Control */
+#define EM_CTRL_MSG_TYPE 0x000f0000
+
+/* Enclosure Management LED Message Type */
+#define EM_MSG_LED_HBA_PORT 0x0000000f
+#define EM_MSG_LED_PMP_SLOT 0x0000ff00
+#define EM_MSG_LED_VALUE 0xffff0000
+#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
+#define EM_MSG_LED_VALUE_OFF 0xfff80000
+#define EM_MSG_LED_VALUE_ON 0x00010000
+
static int ahci_skip_host_reset;
module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
@@ -588,6 +599,9 @@
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
+ /* Promise */
+ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+
/* Generic, PCI class code for AHCI */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -1220,18 +1234,20 @@
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
unsigned long led_message = emp->led_state;
u32 activity_led_state;
+ unsigned long flags;
- led_message &= 0xffff0000;
+ led_message &= EM_MSG_LED_VALUE;
led_message |= ap->port_no | (link->pmp << 8);
/* check to see if we've had activity. If so,
* toggle state of LED and reset timer. If not,
* turn LED to desired idle state.
*/
+ spin_lock_irqsave(ap->lock, flags);
if (emp->saved_activity != emp->activity) {
emp->saved_activity = emp->activity;
/* get the current LED state */
- activity_led_state = led_message & 0x00010000;
+ activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
if (activity_led_state)
activity_led_state = 0;
@@ -1239,17 +1255,18 @@
activity_led_state = 1;
/* clear old state */
- led_message &= 0xfff8ffff;
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
/* toggle state */
led_message |= (activity_led_state << 16);
mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
} else {
/* switch to idle */
- led_message &= 0xfff8ffff;
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
if (emp->blink_policy == BLINK_OFF)
led_message |= (1 << 16);
}
+ spin_unlock_irqrestore(ap->lock, flags);
ahci_transmit_led_message(ap, led_message, 4);
}
@@ -1294,7 +1311,7 @@
struct ahci_em_priv *emp;
/* get the slot number from the message */
- pmp = (state & 0x0000ff00) >> 8;
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
if (pmp < MAX_SLOTS)
emp = &pp->em_priv[pmp];
else
@@ -1319,7 +1336,7 @@
message[0] |= (4 << 8);
/* ignore 0:4 of byte zero, fill in port info yourself */
- message[1] = ((state & 0xfffffff0) | ap->port_no);
+ message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
/* write message to EM_LOC */
writel(message[0], mmio + hpriv->em_loc);
@@ -1362,7 +1379,7 @@
state = simple_strtoul(buf, NULL, 0);
/* get the slot number from the message */
- pmp = (state & 0x0000ff00) >> 8;
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
if (pmp < MAX_SLOTS)
emp = &pp->em_priv[pmp];
else
@@ -1373,7 +1390,7 @@
* activity led through em_message
*/
if (emp->blink_policy)
- state &= 0xfff8ffff;
+ state &= ~EM_MSG_LED_VALUE_ACTIVITY;
return ahci_transmit_led_message(ap, state, size);
}
@@ -1392,16 +1409,16 @@
link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
/* set the LED to OFF */
- port_led_state &= 0xfff80000;
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
port_led_state |= (ap->port_no | (link->pmp << 8));
ahci_transmit_led_message(ap, port_led_state, 4);
} else {
link->flags |= ATA_LFLAG_SW_ACTIVITY;
if (val == BLINK_OFF) {
/* set LED to ON for idle */
- port_led_state &= 0xfff80000;
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
port_led_state |= (ap->port_no | (link->pmp << 8));
- port_led_state |= 0x00010000; /* check this */
+ port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
ahci_transmit_led_message(ap, port_led_state, 4);
}
}
@@ -2612,7 +2629,7 @@
u32 em_loc = readl(mmio + HOST_EM_LOC);
u32 em_ctl = readl(mmio + HOST_EM_CTL);
- messages = (em_ctl & 0x000f0000) >> 16;
+ messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
/* we only support LED message type right now */
if ((messages & 0x01) && (ahci_em_messages == 1)) {
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 75a406f..5c33767 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -1,6 +1,6 @@
/*
* ata_generic.c - Generic PATA/SATA controller driver.
- * Copyright 2005 Red Hat Inc <alan@redhat.com>, all rights reserved.
+ * Copyright 2005 Red Hat Inc, all rights reserved.
*
* Elements from ide/pci/generic.c
* Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index e9e32ed..52dc2d8 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -14,7 +14,7 @@
*
* Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
+ * Copyright (C) 2003 Red Hat Inc
*
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8cb0b36..2ff633c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4156,29 +4156,33 @@
struct ata_link *link;
struct ata_device *dev;
- /* If the controller thinks we are 40 wire, we are */
+ /* If the controller thinks we are 40 wire, we are. */
if (ap->cbl == ATA_CBL_PATA40)
return 1;
- /* If the controller thinks we are 80 wire, we are */
+
+ /* If the controller thinks we are 80 wire, we are. */
if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
return 0;
- /* If the system is known to be 40 wire short cable (eg laptop),
- then we allow 80 wire modes even if the drive isn't sure */
+
+ /* If the system is known to be 40 wire short cable (eg
+ * laptop), then we allow 80 wire modes even if the drive
+ * isn't sure.
+ */
if (ap->cbl == ATA_CBL_PATA40_SHORT)
return 0;
- /* If the controller doesn't know we scan
- - Note: We look for all 40 wire detects at this point.
- Any 80 wire detect is taken to be 80 wire cable
- because
- - In many setups only the one drive (slave if present)
- will give a valid detect
- - If you have a non detect capable drive you don't
- want it to colour the choice
- */
+ /* If the controller doesn't know, we scan.
+ *
+ * Note: We look for all 40 wire detects at this point. Any
+ * 80 wire detect is taken to be 80 wire cable because
+ * - in many setups only the one drive (slave if present) will
+ * give a valid detect
+ * - if you have a non detect capable drive you don't want it
+ * to colour the choice
+ */
ata_port_for_each_link(link, ap) {
ata_link_for_each_dev(dev, link) {
- if (!ata_is_40wire(dev))
+ if (ata_dev_enabled(dev) && !ata_is_40wire(dev))
return 0;
}
}
@@ -4553,6 +4557,7 @@
/**
* ata_qc_new_init - Request an available ATA command, and initialize it
* @dev: Device from whom we request an available command structure
+ * @tag: command tag
*
* LOCKING:
* None.
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 5d687d7..8077bdf 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -603,6 +603,9 @@
ata_link_for_each_dev(dev, link) {
int devno = dev->devno;
+ if (!ata_dev_enabled(dev))
+ continue;
+
ehc->saved_xfer_mode[devno] = dev->xfer_mode;
if (ata_ncq_enabled(dev))
ehc->saved_ncq_enabled |= 1 << devno;
@@ -1161,6 +1164,7 @@
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &link->eh_context;
unsigned long flags;
ata_dev_disable(dev);
@@ -1174,9 +1178,11 @@
ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
}
- /* clear per-dev EH actions */
+ /* clear per-dev EH info */
ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
+ ehc->saved_xfer_mode[dev->devno] = 0;
+ ehc->saved_ncq_enabled &= ~(1 << dev->devno);
spin_unlock_irqrestore(ap->lock, flags);
}
@@ -2787,6 +2793,9 @@
/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
ata_link_for_each_dev(dev, link) {
+ if (!ata_dev_enabled(dev))
+ continue;
+
if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
struct ata_ering_entry *ent;
@@ -2808,6 +2817,9 @@
u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
+ if (!ata_dev_enabled(dev))
+ continue;
+
if (dev->xfer_mode != saved_xfer_mode ||
ata_ncq_enabled(dev) != saved_ncq)
dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index eb919c1..e2e332d 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -1,7 +1,7 @@
/*
* ACPI PATA driver
*
- * (c) 2007 Red Hat <alan@redhat.com>
+ * (c) 2007 Red Hat
*/
#include <linux/kernel.h>
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 5ca70fa..73c466e 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -1,7 +1,6 @@
/*
* pata_ali.c - ALI 15x3 PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* based in part upon
* linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 57dd00f..0ec9c7d 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -1,7 +1,6 @@
/*
* pata_amd.c - AMD PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* Based on pata-sil680. Errata information is taken from data sheets
* and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 0f513bc..6b3092c 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -1,7 +1,7 @@
/*
* pata_artop.c - ARTOP ATA controller driver
*
- * (C) 2006 Red Hat <alan@redhat.com>
+ * (C) 2006 Red Hat
* (C) 2007 Bartlomiej Zolnierkiewicz
*
* Based in part on drivers/ide/pci/aec62xx.c
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index e8a0d99..0e2cde8 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -1,7 +1,6 @@
/*
* pata_atiixp.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* Based on
*
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 2de30b9..34a3942 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -1,7 +1,6 @@
/*
* pata_cmd640.c - CMD640 PCI PATA for new ATA layer
* (C) 2007 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* Based upon
* linux/drivers/ide/pci/cmd640.c Version 1.02 Sep 01, 1996
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index ddd09b7..3167d8f 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -1,7 +1,7 @@
/*
* pata_cmd64x.c - CMD64x PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Based upon
* linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 0c4b271..bba4533 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -1,7 +1,6 @@
/*
* pata-cs5530.c - CS5530 PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* based upon cs5530.c by Mark Lord.
*
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index f1b6556..1b2d4a0 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -1,7 +1,7 @@
/*
* pata-cs5535.c - CS5535 PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
- * Alan Cox <alan@redhat.com>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* based upon cs5535.c from AMD <Jens.Altmann@amd.com> as cleaned up and
* made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 2ff6260..d546425 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -1,7 +1,7 @@
/*
* pata_cypress.c - Cypress PATA for new ATA layer
* (C) 2006 Red Hat Inc
- * Alan Cox <alan@redhat.com>
+ * Alan Cox
*
* Based heavily on
* linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 9fba829..ac6392e 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -1,7 +1,7 @@
/*
* pata_efar.c - EFAR PIIX clone controller driver
*
- * (C) 2005 Red Hat <alan@redhat.com>
+ * (C) 2005 Red Hat
*
* Some parts based on ata_piix.c by Jeff Garzik and others.
*
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 6a111ba..15cdb91 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -1,7 +1,7 @@
/*
* pata-isapnp.c - ISA PnP PATA controller driver.
- * Copyright 2005/2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
+ * Copyright 2005/2006 Red Hat Inc, all rights reserved.
*
* Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru>
*/
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 0221c9a..860ede5 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -1,7 +1,7 @@
/*
* pata_it821x.c - IT821x PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
* (C) 2007 Bartlomiej Zolnierkiewicz
*
* based upon
@@ -10,7 +10,7 @@
*
* linux/drivers/ide/pci/it821x.c Version 0.09 December 2004
*
- * Copyright (C) 2004 Red Hat <alan@redhat.com>
+ * Copyright (C) 2004 Red Hat
*
* May be copied or modified under the terms of the GNU General Public License
* Based in part on the ITE vendor provided SCSI driver.
@@ -557,9 +557,8 @@
if (strstr(model_num, "Integrated Technology Express")) {
/* Set feature bits the firmware neglects */
id[49] |= 0x0300; /* LBA, DMA */
- id[82] |= 0x0400; /* LBA48 */
id[83] &= 0x7FFF;
- id[83] |= 0x4000; /* Word 83 is valid */
+ id[83] |= 0x4400; /* Word 83 is valid and LBA48 */
id[86] |= 0x0400; /* LBA48 on */
id[ATA_ID_MAJOR_VER] |= 0x1F;
}
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 73b7596..38cf1ab 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -4,7 +4,7 @@
* driven by AHCI in the usual configuration although
* this driver can handle other setups if we need it.
*
- * (c) 2006 Red Hat <alan@redhat.com>
+ * (c) 2006 Red Hat
*/
#include <linux/kernel.h>
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index bc037ff..930c220 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -1,6 +1,6 @@
/*
* pata-legacy.c - Legacy port PATA/SATA controller driver.
- * Copyright 2005/2006 Red Hat <alan@redhat.com>, all rights reserved.
+ * Copyright 2005/2006 Red Hat, all rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 0d87eec..76e399b 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -5,7 +5,7 @@
* isn't making full use of the device functionality but it is
* easy to get working.
*
- * (c) 2006 Red Hat <alan@redhat.com>
+ * (c) 2006 Red Hat
*/
#include <linux/kernel.h>
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 7d7e3fd..7c8faa48 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -1,7 +1,7 @@
/*
* pata_mpiix.c - Intel MPIIX PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
- * Alan Cox <alan@redhat.com>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* The MPIIX is different enough to the PIIX4 and friends that we give it
* a separate driver. The old ide/pci code handles this by just not tuning
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index d9719c8..9dc05e1 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -1,7 +1,7 @@
/*
* pata_netcell.c - Netcell PATA driver
*
- * (c) 2006 Red Hat <alan@redhat.com>
+ * (c) 2006 Red Hat
*/
#include <linux/kernel.h>
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 565e67c..4e466ea 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -1,7 +1,6 @@
/*
* pata_ninja32.c - Ninja32 PATA for new ATA layer
* (C) 2007 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* Note: The controller like many controllers has shared timings for
* PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
@@ -45,7 +44,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_ninja32"
-#define DRV_VERSION "0.0.1"
+#define DRV_VERSION "0.1.1"
/**
@@ -89,6 +88,17 @@
.set_piomode = ninja32_set_piomode,
};
+static void ninja32_program(void __iomem *base)
+{
+ iowrite8(0x05, base + 0x01); /* Enable interrupt lines */
+ iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */
+ iowrite8(0x01, base + 0x03); /* Unknown */
+ iowrite8(0x20, base + 0x04); /* WAIT0 */
+ iowrite8(0x8f, base + 0x05); /* Unknown */
+ iowrite8(0xa4, base + 0x1c); /* Unknown */
+ iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */
+}
+
static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ata_host *host;
@@ -134,18 +144,28 @@
ap->ioaddr.bmdma_addr = base;
ata_sff_std_ports(&ap->ioaddr);
- iowrite8(0x05, base + 0x01); /* Enable interrupt lines */
- iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */
- iowrite8(0x01, base + 0x03); /* Unknown */
- iowrite8(0x20, base + 0x04); /* WAIT0 */
- iowrite8(0x8f, base + 0x05); /* Unknown */
- iowrite8(0xa4, base + 0x1c); /* Unknown */
- iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */
+ ninja32_program(base);
/* FIXME: Should we disable them at remove ? */
return ata_host_activate(host, dev->irq, ata_sff_interrupt,
IRQF_SHARED, &ninja32_sht);
}
+#ifdef CONFIG_PM
+
+static int ninja32_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+ ninja32_program(host->iomap[0]);
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
static const struct pci_device_id ninja32[] = {
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
@@ -156,7 +176,11 @@
.name = DRV_NAME,
.id_table = ninja32,
.probe = ninja32_init_one,
- .remove = ata_pci_remove_one
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ninja32_reinit_one,
+#endif
};
static int __init ninja32_init(void)
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index be756b7..40d411c 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -1,7 +1,6 @@
/*
* pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer
* (C) 2006 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index e0aa7ea..89bf5f86 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -1,7 +1,7 @@
/*
* pata_ns87415.c - NS87415 (non PARISC) PATA
*
- * (C) 2005 Red Hat <alan@redhat.com>
+ * (C) 2005 Red Hat <alan@lxorguk.ukuu.org.uk>
*
* This is a fairly generic MWDMA controller. It has some limitations
* as it requires timing reloads on PIO/DMA transitions but it is otherwise
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index df64f24..c0dbc46 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -1,7 +1,7 @@
/*
* pata_oldpiix.c - Intel PATA/SATA controllers
*
- * (C) 2005 Red Hat <alan@redhat.com>
+ * (C) 2005 Red Hat
*
* Some parts based on ata_piix.c by Jeff Garzik and others.
*
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index fb2cf66..e4fa4d5 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -1,7 +1,6 @@
/*
* pata_opti.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* Based on
* linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 4cd7444..93bb6e9 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -1,7 +1,6 @@
/*
* pata_optidma.c - Opti DMA PATA for new ATA layer
* (C) 2006 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* The Opti DMA controllers are related to the older PIO PCI controllers
* and indeed the VLB ones. The main differences are that the timing
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 02b596b..271cb64 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -1,6 +1,6 @@
/*
* pata_pcmcia.c - PCMCIA PATA controller driver.
- * Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
+ * Copyright 2005-2006 Red Hat Inc, all rights reserved.
* PCMCIA ident update Copyright 2006 Marcin Juszkiewicz
* <openembedded@hrw.one.pl>
*
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index d267306..799a6a0 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -1,7 +1,7 @@
/*
* pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
* (C) 2007 Bartlomiej Zolnierkiewicz
*
* Based in part on linux/drivers/ide/pci/pdc202xx_old.c
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 8f65ad6..77e4e3b 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -5,7 +5,7 @@
*
* Based on pata_pcmcia:
*
- * Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
+ * Copyright 2005-2006 Red Hat Inc, all rights reserved.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 63b7a1c..3080f37 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -1,6 +1,6 @@
/*
* pata_qdi.c - QDI VLB ATA controllers
- * (C) 2006 Red Hat <alan@redhat.com>
+ * (C) 2006 Red Hat
*
* This driver mostly exists as a proof of concept for non PCI devices under
* libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 1c0d9fa..0b0aa45 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -1,7 +1,7 @@
/*
* pata_radisys.c - Intel PATA/SATA controllers
*
- * (C) 2006 Red Hat <alan@redhat.com>
+ * (C) 2006 Red Hat <alan@lxorguk.ukuu.org.uk>
*
* Some parts based on ata_piix.c by Jeff Garzik and others.
*
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 0278fd2..9a4bdca 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -1,5 +1,5 @@
/*
- * New ATA layer SC1200 driver Alan Cox <alan@redhat.com>
+ * New ATA layer SC1200 driver Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* TODO: Mode selection filtering
* TODO: Can't enable second channel until ATA core has serialize
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 16673d1..cf3707e 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -8,7 +8,7 @@
* Copyright 2003-2005 Jeff Garzik
* Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
+ * Copyright (C) 2003 Red Hat Inc
*
* and drivers/ata/ahci.c:
* Copyright 2004-2005 Red Hat, Inc.
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index ffd26d0..72e41c9 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -1,7 +1,6 @@
/*
* pata_serverworks.c - Serverworks PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* based upon
*
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index a598bb3..83580a5 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -1,7 +1,6 @@
/*
* pata_sil680.c - SIL680 PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* based upon
*
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 26345d7..d342366 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -1,7 +1,7 @@
/*
* pata_sis.c - SiS ATA driver
*
- * (C) 2005 Red Hat <alan@redhat.com>
+ * (C) 2005 Red Hat
* (C) 2007 Bartlomiej Zolnierkiewicz
*
* Based upon linux/drivers/ide/pci/sis5513.c
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 69877bd..1b0e7b6 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -1,7 +1,6 @@
/*
* pata_sl82c105.c - SL82C105 PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* Based in part on linux/drivers/ide/pci/sl82c105.c
* SL82C105/Winbond 553 IDE driver
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index b181261..ef95975 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -1,7 +1,7 @@
/*
* pata_triflex.c - Compaq PATA for new ATA layer
* (C) 2005 Red Hat Inc
- * Alan Cox <alan@redhat.com>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* based upon
*
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 8fdb2ce..681169c 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -1,7 +1,6 @@
/*
* pata_via.c - VIA PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
- * Alan Cox <alan@redhat.com>
*
* Documentation
* Most chipset documentation available under NDA only
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index a7606b0..319e164 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -1,6 +1,6 @@
/*
* pata_winbond.c - Winbond VLB ATA controllers
- * (C) 2006 Red Hat <alan@redhat.com>
+ * (C) 2006 Red Hat
*
* Support for the Winbond 83759A when operating in advanced mode.
* Multichip mode is not currently supported.
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 4621807..ccee930 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1329,6 +1329,11 @@
}
}
+ /* Set max read request size to 4096. This slightly increases
+ * write throughput for pci-e variants.
+ */
+ pcie_set_readrq(pdev, 4096);
+
sil24_init_controller(host);
pci_set_master(pdev);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 023803d..ae18baf 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -76,7 +76,7 @@
static void
tapeblock_end_request(struct request *req, int error)
{
- if (__blk_end_request(req, error, blk_rq_bytes(req)))
+ if (blk_end_request(req, error, blk_rq_bytes(req)))
BUG();
}
@@ -166,7 +166,7 @@
nr_queued++;
spin_unlock(get_ccwdev_lock(device->cdev));
- spin_lock(&device->blk_data.request_queue_lock);
+ spin_lock_irq(&device->blk_data.request_queue_lock);
while (
!blk_queue_plugged(queue) &&
elv_next_request(queue) &&
@@ -176,7 +176,9 @@
if (rq_data_dir(req) == WRITE) {
DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
blkdev_dequeue_request(req);
+ spin_unlock_irq(&device->blk_data.request_queue_lock);
tapeblock_end_request(req, -EIO);
+ spin_lock_irq(&device->blk_data.request_queue_lock);
continue;
}
blkdev_dequeue_request(req);
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index d7073db..f9bb51f 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -1200,7 +1200,7 @@
{
int rc;
- spin_lock(get_ccwdev_lock(device->cdev));
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(6, "TAPE:nodev\n");
rc = -ENODEV;
@@ -1218,7 +1218,7 @@
tape_state_set(device, TS_IN_USE);
rc = 0;
}
- spin_unlock(get_ccwdev_lock(device->cdev));
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
@@ -1228,11 +1228,11 @@
int
tape_release(struct tape_device *device)
{
- spin_lock(get_ccwdev_lock(device->cdev));
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
if (device->tape_state == TS_IN_USE)
tape_state_set(device, TS_UNUSED);
module_put(device->discipline->owner);
- spin_unlock(get_ccwdev_lock(device->cdev));
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
return 0;
}
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index b539082..f055903 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -20,6 +20,7 @@
#define MAX_DEBUGFS_QUEUES 32
static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
static DEFINE_MUTEX(debugfs_mutex);
+#define QDIO_DEBUGFS_NAME_LEN 40
void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
{
@@ -152,17 +153,6 @@
filp->f_path.dentry->d_inode->i_private);
}
-static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
-{
- memset(name, 0, sizeof(name));
- sprintf(name, "%s", dev_name(&cdev->dev));
- if (q->is_input_q)
- sprintf(name + strlen(name), "_input");
- else
- sprintf(name + strlen(name), "_output");
- sprintf(name + strlen(name), "_%d", q->nr);
-}
-
static void remove_debugfs_entry(struct qdio_q *q)
{
int i;
@@ -189,14 +179,17 @@
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{
int i = 0;
- char name[40];
+ char name[QDIO_DEBUGFS_NAME_LEN];
while (debugfs_queues[i] != NULL) {
i++;
if (i >= MAX_DEBUGFS_QUEUES)
return;
}
- get_queue_name(q, cdev, name);
+ snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d",
+ dev_name(&cdev->dev),
+ q->is_input_q ? "input" : "output",
+ q->nr);
debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
debugfs_root, q, &debugfs_fops);
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index a50682d..7c86591 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1083,7 +1083,6 @@
case -EIO:
sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1, setup, dbf_text);
- qdio_int_error(cdev);
return;
case -ETIMEDOUT:
sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 9ce1ab6..1e3b934 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -774,7 +774,7 @@
poll.nr_ports = 1;
poll.timeout = 0;
- poll.ports = &evtchn;
+ set_xen_guest_handle(poll.ports, &evtchn);
if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
BUG();
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index d0e87cb..9b91617 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -39,8 +39,6 @@
BUG_ON(!irqs_disabled());
- load_cr3(swapper_pg_dir);
-
err = device_power_down(PMSG_SUSPEND);
if (err) {
printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n",
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
index 797cb4e..a240b2c 100644
--- a/drivers/xen/xencomm.c
+++ b/drivers/xen/xencomm.c
@@ -23,13 +23,7 @@
#include <asm/page.h>
#include <xen/xencomm.h>
#include <xen/interface/xen.h>
-#ifdef __ia64__
-#include <asm/xen/xencomm.h> /* for is_kern_addr() */
-#endif
-
-#ifdef HAVE_XEN_PLATFORM_COMPAT_H
-#include <xen/platform-compat.h>
-#endif
+#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
static int xencomm_init(struct xencomm_desc *desc,
void *buffer, unsigned long bytes)
@@ -157,20 +151,11 @@
return 0;
}
-/* check if memory address is within VMALLOC region */
-static int is_phys_contiguous(unsigned long addr)
-{
- if (!is_kernel_addr(addr))
- return 0;
-
- return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
-}
-
static struct xencomm_handle *xencomm_create_inline(void *ptr)
{
unsigned long paddr;
- BUG_ON(!is_phys_contiguous((unsigned long)ptr));
+ BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
paddr = (unsigned long)xencomm_pa(ptr);
BUG_ON(paddr & XENCOMM_INLINE_FLAG);
@@ -202,7 +187,7 @@
int rc;
struct xencomm_desc *desc;
- if (is_phys_contiguous((unsigned long)ptr))
+ if (xencomm_is_phys_contiguous((unsigned long)ptr))
return xencomm_create_inline(ptr);
rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
@@ -219,7 +204,7 @@
int rc;
struct xencomm_desc *desc = NULL;
- if (is_phys_contiguous((unsigned long)ptr))
+ if (xencomm_is_phys_contiguous((unsigned long)ptr))
return xencomm_create_inline(ptr);
rc = xencomm_create_mini(ptr, bytes, xc_desc,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8478f334..b483f39 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -936,7 +936,6 @@
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
void (*yield_task) (struct rq *rq);
- int (*select_task_rq)(struct task_struct *p, int sync);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
@@ -944,6 +943,8 @@
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
+ int (*select_task_rq)(struct task_struct *p, int sync);
+
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
@@ -955,16 +956,17 @@
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
-#endif
- void (*set_curr_task) (struct rq *rq);
- void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
- void (*task_new) (struct rq *rq, struct task_struct *p);
void (*set_cpus_allowed)(struct task_struct *p,
const cpumask_t *newmask);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
+#endif
+
+ void (*set_curr_task) (struct rq *rq);
+ void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
+ void (*task_new) (struct rq *rq, struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index fac014a..4d161c70 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -220,7 +220,7 @@
}
}
-void register_default_affinity_proc(void)
+static void register_default_affinity_proc(void)
{
#ifdef CONFIG_SMP
proc_create("irq/default_smp_affinity", 0600, NULL,
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index dbda475..06e15711 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2169,12 +2169,11 @@
/*
* Hardirqs will be enabled:
*/
-void trace_hardirqs_on_caller(unsigned long a0)
+void trace_hardirqs_on_caller(unsigned long ip)
{
struct task_struct *curr = current;
- unsigned long ip;
- time_hardirqs_on(CALLER_ADDR0, a0);
+ time_hardirqs_on(CALLER_ADDR0, ip);
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
@@ -2188,7 +2187,6 @@
}
/* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1;
- ip = (unsigned long) __builtin_return_address(0);
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
@@ -2224,11 +2222,11 @@
/*
* Hardirqs were disabled:
*/
-void trace_hardirqs_off_caller(unsigned long a0)
+void trace_hardirqs_off_caller(unsigned long ip)
{
struct task_struct *curr = current;
- time_hardirqs_off(CALLER_ADDR0, a0);
+ time_hardirqs_off(CALLER_ADDR0, ip);
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
@@ -2241,7 +2239,7 @@
* We have done an ON -> OFF transition:
*/
curr->hardirqs_enabled = 0;
- curr->hardirq_disable_ip = _RET_IP_;
+ curr->hardirq_disable_ip = ip;
curr->hardirq_disable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_off_events);
} else
@@ -3417,9 +3415,10 @@
}
printk(" ignoring it.\n");
unlock = 0;
+ } else {
+ if (count != 10)
+ printk(KERN_CONT " locked it.\n");
}
- if (count != 10)
- printk(" locked it.\n");
do_each_thread(g, p) {
/*
diff --git a/kernel/printk.c b/kernel/printk.c
index 6341af7..f492f15 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -233,45 +233,6 @@
#endif
/*
- * Return the number of unread characters in the log buffer.
- */
-static int log_buf_get_len(void)
-{
- return logged_chars;
-}
-
-/*
- * Copy a range of characters from the log buffer.
- */
-int log_buf_copy(char *dest, int idx, int len)
-{
- int ret, max;
- bool took_lock = false;
-
- if (!oops_in_progress) {
- spin_lock_irq(&logbuf_lock);
- took_lock = true;
- }
-
- max = log_buf_get_len();
- if (idx < 0 || idx >= max) {
- ret = -1;
- } else {
- if (len > max)
- len = max;
- ret = len;
- idx += (log_end - max);
- while (len-- > 0)
- dest[len] = LOG_BUF(idx + len);
- }
-
- if (took_lock)
- spin_unlock_irq(&logbuf_lock);
-
- return ret;
-}
-
-/*
* Commands to do_syslog:
*
* 0 -- Close the log. Currently a NOP.
diff --git a/kernel/resource.c b/kernel/resource.c
index 4089d12..7fec0e4 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -571,7 +571,7 @@
}
-void reserve_region_with_split(struct resource *root,
+void __init reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end,
const char *name)
{
diff --git a/kernel/sched.c b/kernel/sched.c
index 6625c3c..e8819bc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -386,7 +386,6 @@
u64 exec_clock;
u64 min_vruntime;
- u64 pair_start;
struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
@@ -3344,7 +3343,7 @@
} else
this_load_per_task = cpu_avg_load_per_task(this_cpu);
- if (max_load - this_load + 2*busiest_load_per_task >=
+ if (max_load - this_load + busiest_load_per_task >=
busiest_load_per_task * imbn) {
*imbalance = busiest_load_per_task;
return busiest;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9573c33..ce514af 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -143,6 +143,49 @@
return se->parent;
}
+/* return depth at which a sched entity is present in the hierarchy */
+static inline int depth_se(struct sched_entity *se)
+{
+ int depth = 0;
+
+ for_each_sched_entity(se)
+ depth++;
+
+ return depth;
+}
+
+static void
+find_matching_se(struct sched_entity **se, struct sched_entity **pse)
+{
+ int se_depth, pse_depth;
+
+ /*
+ * preemption test can be made between sibling entities who are in the
+ * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
+ * both tasks until we find their ancestors who are siblings of common
+ * parent.
+ */
+
+ /* First walk up until both entities are at same depth */
+ se_depth = depth_se(*se);
+ pse_depth = depth_se(*pse);
+
+ while (se_depth > pse_depth) {
+ se_depth--;
+ *se = parent_entity(*se);
+ }
+
+ while (pse_depth > se_depth) {
+ pse_depth--;
+ *pse = parent_entity(*pse);
+ }
+
+ while (!is_same_group(*se, *pse)) {
+ *se = parent_entity(*se);
+ *pse = parent_entity(*pse);
+ }
+}
+
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
@@ -193,6 +236,11 @@
return NULL;
}
+static inline void
+find_matching_se(struct sched_entity **se, struct sched_entity **pse)
+{
+}
+
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -223,6 +271,27 @@
return se->vruntime - cfs_rq->min_vruntime;
}
+static void update_min_vruntime(struct cfs_rq *cfs_rq)
+{
+ u64 vruntime = cfs_rq->min_vruntime;
+
+ if (cfs_rq->curr)
+ vruntime = cfs_rq->curr->vruntime;
+
+ if (cfs_rq->rb_leftmost) {
+ struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
+ struct sched_entity,
+ run_node);
+
+ if (vruntime == cfs_rq->min_vruntime)
+ vruntime = se->vruntime;
+ else
+ vruntime = min_vruntime(vruntime, se->vruntime);
+ }
+
+ cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
+}
+
/*
* Enqueue an entity into the rb-tree:
*/
@@ -256,15 +325,8 @@
* Maintain a cache of leftmost tree entries (it is frequently
* used):
*/
- if (leftmost) {
+ if (leftmost)
cfs_rq->rb_leftmost = &se->run_node;
- /*
- * maintain cfs_rq->min_vruntime to be a monotonic increasing
- * value tracking the leftmost vruntime in the tree.
- */
- cfs_rq->min_vruntime =
- max_vruntime(cfs_rq->min_vruntime, se->vruntime);
- }
rb_link_node(&se->run_node, parent, link);
rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
@@ -274,18 +336,9 @@
{
if (cfs_rq->rb_leftmost == &se->run_node) {
struct rb_node *next_node;
- struct sched_entity *next;
next_node = rb_next(&se->run_node);
cfs_rq->rb_leftmost = next_node;
-
- if (next_node) {
- next = rb_entry(next_node,
- struct sched_entity, run_node);
- cfs_rq->min_vruntime =
- max_vruntime(cfs_rq->min_vruntime,
- next->vruntime);
- }
}
if (cfs_rq->next == se)
@@ -424,6 +477,7 @@
schedstat_add(cfs_rq, exec_clock, delta_exec);
delta_exec_weighted = calc_delta_fair(delta_exec, curr);
curr->vruntime += delta_exec_weighted;
+ update_min_vruntime(cfs_rq);
}
static void update_curr(struct cfs_rq *cfs_rq)
@@ -613,13 +667,7 @@
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
- u64 vruntime;
-
- if (first_fair(cfs_rq)) {
- vruntime = min_vruntime(cfs_rq->min_vruntime,
- __pick_next_entity(cfs_rq)->vruntime);
- } else
- vruntime = cfs_rq->min_vruntime;
+ u64 vruntime = cfs_rq->min_vruntime;
/*
* The 'current' period is already promised to the current tasks,
@@ -696,6 +744,7 @@
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
account_entity_dequeue(cfs_rq, se);
+ update_min_vruntime(cfs_rq);
}
/*
@@ -742,16 +791,14 @@
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
+static int
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
+
static struct sched_entity *
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- struct rq *rq = rq_of(cfs_rq);
- u64 pair_slice = rq->clock - cfs_rq->pair_start;
-
- if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) {
- cfs_rq->pair_start = rq->clock;
+ if (!cfs_rq->next || wakeup_preempt_entity(cfs_rq->next, se) == 1)
return se;
- }
return cfs_rq->next;
}
@@ -1122,10 +1169,9 @@
if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
return 0;
- if (!sync && sched_feat(SYNC_WAKEUPS) &&
- curr->se.avg_overlap < sysctl_sched_migration_cost &&
- p->se.avg_overlap < sysctl_sched_migration_cost)
- sync = 1;
+ if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
+ p->se.avg_overlap > sysctl_sched_migration_cost))
+ sync = 0;
/*
* If sync wakeup then subtract the (maximum possible)
@@ -1244,13 +1290,42 @@
* More easily preempt - nice tasks, while not making it harder for
* + nice tasks.
*/
- if (sched_feat(ASYM_GRAN))
- gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load);
+ if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
+ gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
return gran;
}
/*
+ * Should 'se' preempt 'curr'.
+ *
+ * |s1
+ * |s2
+ * |s3
+ * g
+ * |<--->|c
+ *
+ * w(c, s1) = -1
+ * w(c, s2) = 0
+ * w(c, s3) = 1
+ *
+ */
+static int
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
+{
+ s64 gran, vdiff = curr->vruntime - se->vruntime;
+
+ if (vdiff <= 0)
+ return -1;
+
+ gran = wakeup_gran(curr);
+ if (vdiff > gran)
+ return 1;
+
+ return 0;
+}
+
+/*
* Preempt the current task with a newly woken task if needed:
*/
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
@@ -1258,7 +1333,6 @@
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se, *pse = &p->se;
- s64 delta_exec;
if (unlikely(rt_prio(p->prio))) {
update_rq_clock(rq);
@@ -1296,9 +1370,19 @@
return;
}
- delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime;
- if (delta_exec > wakeup_gran(pse))
- resched_task(curr);
+ find_matching_se(&se, &pse);
+
+ while (se) {
+ BUG_ON(!pse);
+
+ if (wakeup_preempt_entity(se, pse) == 1) {
+ resched_task(curr);
+ break;
+ }
+
+ se = parent_entity(se);
+ pse = parent_entity(pse);
+ }
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1594,9 +1678,6 @@
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
-#ifdef CONFIG_SMP
- .select_task_rq = select_task_rq_fair,
-#endif /* CONFIG_SMP */
.check_preempt_curr = check_preempt_wakeup,
@@ -1604,6 +1685,8 @@
.put_prev_task = put_prev_task_fair,
#ifdef CONFIG_SMP
+ .select_task_rq = select_task_rq_fair,
+
.load_balance = load_balance_fair,
.move_one_task = move_one_task_fair,
#endif
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index dec4cca..8a21a2e 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -105,9 +105,6 @@
/* dequeue is not valid, we print a debug message there: */
.dequeue_task = dequeue_task_idle,
-#ifdef CONFIG_SMP
- .select_task_rq = select_task_rq_idle,
-#endif /* CONFIG_SMP */
.check_preempt_curr = check_preempt_curr_idle,
@@ -115,6 +112,8 @@
.put_prev_task = put_prev_task_idle,
#ifdef CONFIG_SMP
+ .select_task_rq = select_task_rq_idle,
+
.load_balance = load_balance_idle,
.move_one_task = move_one_task_idle,
#endif
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index b446dc8..d9ba9d5 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1504,9 +1504,6 @@
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt,
-#ifdef CONFIG_SMP
- .select_task_rq = select_task_rq_rt,
-#endif /* CONFIG_SMP */
.check_preempt_curr = check_preempt_curr_rt,
@@ -1514,6 +1511,8 @@
.put_prev_task = put_prev_task_rt,
#ifdef CONFIG_SMP
+ .select_task_rq = select_task_rq_rt,
+
.load_balance = load_balance_rt,
.move_one_task = move_one_task_rt,
.set_cpus_allowed = set_cpus_allowed_rt,
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index f8eebd48..78330c3 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -497,8 +497,10 @@
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)*hwdev->dma_mask,
(unsigned long long)dev_addr);
- panic("swiotlb_alloc_coherent: allocated memory is out of "
- "range for device");
+
+ /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
+ unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
+ return NULL;
}
*dma_handle = dev_addr;
return ret;