Commit b8ce9fb8 authored by Tony Lindgren's avatar Tony Lindgren
Browse files

Merge branch 'fixes-v3.0-rc3' into devel-fixes

parents c8e0bf95 e9e35c5a
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#define ASM_PPC_RIO_H #define ASM_PPC_RIO_H
extern void platform_rio_init(void); extern void platform_rio_init(void);
#ifdef CONFIG_RAPIDIO #ifdef CONFIG_FSL_RIO
extern int fsl_rio_mcheck_exception(struct pt_regs *); extern int fsl_rio_mcheck_exception(struct pt_regs *);
#else #else
static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; } static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
......
...@@ -1979,7 +1979,7 @@ static struct cpu_spec __initdata cpu_specs[] = { ...@@ -1979,7 +1979,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_value = 0x80240000, .pvr_value = 0x80240000,
.cpu_name = "e5500", .cpu_name = "e5500",
.cpu_features = CPU_FTRS_E5500, .cpu_features = CPU_FTRS_E5500,
.cpu_user_features = COMMON_USER_BOOKE, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX, MMU_FTR_USE_TLBILX,
.icache_bsize = 64, .icache_bsize = 64,
......
...@@ -82,11 +82,29 @@ static int __init early_parse_mem(char *p) ...@@ -82,11 +82,29 @@ static int __init early_parse_mem(char *p)
} }
early_param("mem", early_parse_mem); early_param("mem", early_parse_mem);
/*
* overlaps_initrd - check for overlap with page aligned extension of
* initrd.
*/
static inline int overlaps_initrd(unsigned long start, unsigned long size)
{
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start)
return 0;
return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
start <= _ALIGN_UP(initrd_end, PAGE_SIZE);
#else
return 0;
#endif
}
/** /**
* move_device_tree - move tree to an unused area, if needed. * move_device_tree - move tree to an unused area, if needed.
* *
* The device tree may be allocated beyond our memory limit, or inside the * The device tree may be allocated beyond our memory limit, or inside the
* crash kernel region for kdump. If so, move it out of the way. * crash kernel region for kdump, or within the page aligned range of initrd.
* If so, move it out of the way.
*/ */
static void __init move_device_tree(void) static void __init move_device_tree(void)
{ {
...@@ -99,7 +117,8 @@ static void __init move_device_tree(void) ...@@ -99,7 +117,8 @@ static void __init move_device_tree(void)
size = be32_to_cpu(initial_boot_params->totalsize); size = be32_to_cpu(initial_boot_params->totalsize);
if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
overlaps_crashkernel(start, size)) { overlaps_crashkernel(start, size) ||
overlaps_initrd(start, size)) {
p = __va(memblock_alloc(size, PAGE_SIZE)); p = __va(memblock_alloc(size, PAGE_SIZE));
memcpy(p, initial_boot_params, size); memcpy(p, initial_boot_params, size);
initial_boot_params = (struct boot_param_header *)p; initial_boot_params = (struct boot_param_header *)p;
...@@ -555,7 +574,9 @@ static void __init early_reserve_mem(void) ...@@ -555,7 +574,9 @@ static void __init early_reserve_mem(void)
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
/* then reserve the initrd, if any */ /* then reserve the initrd, if any */
if (initrd_start && (initrd_end > initrd_start)) if (initrd_start && (initrd_end > initrd_start))
memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
_ALIGN_UP(initrd_end, PAGE_SIZE) -
_ALIGN_DOWN(initrd_start, PAGE_SIZE));
#endif /* CONFIG_BLK_DEV_INITRD */ #endif /* CONFIG_BLK_DEV_INITRD */
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
......
...@@ -223,21 +223,6 @@ void free_initmem(void) ...@@ -223,21 +223,6 @@ void free_initmem(void)
#undef FREESEC #undef FREESEC
} }
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
}
#endif
#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
void setup_initial_memory_limit(phys_addr_t first_memblock_base, void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size) phys_addr_t first_memblock_size)
......
...@@ -99,20 +99,6 @@ void free_initmem(void) ...@@ -99,20 +99,6 @@ void free_initmem(void)
((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
} }
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
}
#endif
static void pgd_ctor(void *addr) static void pgd_ctor(void *addr)
{ {
memset(addr, 0, PGD_TABLE_SIZE); memset(addr, 0, PGD_TABLE_SIZE);
......
...@@ -382,6 +382,25 @@ void __init mem_init(void) ...@@ -382,6 +382,25 @@ void __init mem_init(void)
mem_init_done = 1; mem_init_done = 1;
} }
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
if (start >= end)
return;
start = _ALIGN_DOWN(start, PAGE_SIZE);
end = _ALIGN_UP(end, PAGE_SIZE);
pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
}
#endif
/* /*
* This is called when a page has been modified by the kernel. * This is called when a page has been modified by the kernel.
* It just marks the page as not i-cache clean. We do the i-cache * It just marks the page as not i-cache clean. We do the i-cache
......
...@@ -196,9 +196,6 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl, ...@@ -196,9 +196,6 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
out_be32(&lbc->lteccr, LTECCR_CLEAR); out_be32(&lbc->lteccr, LTECCR_CLEAR);
out_be32(&lbc->ltedr, LTEDR_ENABLE); out_be32(&lbc->ltedr, LTEDR_ENABLE);
/* Enable interrupts for any detected events */
out_be32(&lbc->lteir, LTEIR_ENABLE);
/* Set the monitor timeout value to the maximum for erratum A001 */ /* Set the monitor timeout value to the maximum for erratum A001 */
if (of_device_is_compatible(node, "fsl,elbc")) if (of_device_is_compatible(node, "fsl,elbc"))
clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS); clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
...@@ -322,6 +319,9 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev) ...@@ -322,6 +319,9 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
goto err; goto err;
} }
/* Enable interrupts for any detected events */
out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
return 0; return 0;
err: err:
......
...@@ -89,6 +89,7 @@ config S390 ...@@ -89,6 +89,7 @@ config S390
select HAVE_GET_USER_PAGES_FAST select HAVE_GET_USER_PAGES_FAST
select HAVE_ARCH_MUTEX_CPU_RELAX select HAVE_ARCH_MUTEX_CPU_RELAX
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
select HAVE_RCU_TABLE_FREE if SMP
select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK select ARCH_INLINE_SPIN_LOCK
......
...@@ -17,15 +17,15 @@ ...@@ -17,15 +17,15 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/mm.h> #include <linux/mm.h>
#define check_pgt_cache() do {} while (0)
unsigned long *crst_table_alloc(struct mm_struct *); unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *); void crst_table_free(struct mm_struct *, unsigned long *);
void crst_table_free_rcu(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *); unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *); void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mm_struct *, unsigned long *); #ifdef CONFIG_HAVE_RCU_TABLE_FREE
void page_table_free_rcu(struct mmu_gather *, unsigned long *);
void __tlb_remove_table(void *_table);
#endif
static inline void clear_table(unsigned long *s, unsigned long val, size_t n) static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{ {
......
...@@ -293,19 +293,6 @@ extern unsigned long VMALLOC_START; ...@@ -293,19 +293,6 @@ extern unsigned long VMALLOC_START;
* swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
*/ */
/* Page status table bits for virtualization */
#define RCP_ACC_BITS 0xf000000000000000UL
#define RCP_FP_BIT 0x0800000000000000UL
#define RCP_PCL_BIT 0x0080000000000000UL
#define RCP_HR_BIT 0x0040000000000000UL
#define RCP_HC_BIT 0x0020000000000000UL
#define RCP_GR_BIT 0x0004000000000000UL
#define RCP_GC_BIT 0x0002000000000000UL
/* User dirty / referenced bit for KVM's migration feature */
#define KVM_UR_BIT 0x0000800000000000UL
#define KVM_UC_BIT 0x0000400000000000UL
#ifndef __s390x__ #ifndef __s390x__
/* Bits in the segment table address-space-control-element */ /* Bits in the segment table address-space-control-element */
...@@ -325,6 +312,19 @@ extern unsigned long VMALLOC_START; ...@@ -325,6 +312,19 @@ extern unsigned long VMALLOC_START;
#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
/* Page status table bits for virtualization */
#define RCP_ACC_BITS 0xf0000000UL
#define RCP_FP_BIT 0x08000000UL
#define RCP_PCL_BIT 0x00800000UL
#define RCP_HR_BIT 0x00400000UL
#define RCP_HC_BIT 0x00200000UL
#define RCP_GR_BIT 0x00040000UL
#define RCP_GC_BIT 0x00020000UL
/* User dirty / referenced bit for KVM's migration feature */
#define KVM_UR_BIT 0x00008000UL
#define KVM_UC_BIT 0x00004000UL
#else /* __s390x__ */ #else /* __s390x__ */
/* Bits in the segment/region table address-space-control-element */ /* Bits in the segment/region table address-space-control-element */
...@@ -367,6 +367,19 @@ extern unsigned long VMALLOC_START; ...@@ -367,6 +367,19 @@ extern unsigned long VMALLOC_START;
#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
/* Page status table bits for virtualization */
#define RCP_ACC_BITS 0xf000000000000000UL
#define RCP_FP_BIT 0x0800000000000000UL
#define RCP_PCL_BIT 0x0080000000000000UL
#define RCP_HR_BIT 0x0040000000000000UL
#define RCP_HC_BIT 0x0020000000000000UL
#define RCP_GR_BIT 0x0004000000000000UL
#define RCP_GC_BIT 0x0002000000000000UL
/* User dirty / referenced bit for KVM's migration feature */
#define KVM_UR_BIT 0x0000800000000000UL
#define KVM_UC_BIT 0x0000400000000000UL
#endif /* __s390x__ */ #endif /* __s390x__ */
/* /*
......
...@@ -139,110 +139,47 @@ struct slib { ...@@ -139,110 +139,47 @@ struct slib {
struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q]; struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed, aligned(2048))); } __attribute__ ((packed, aligned(2048)));
/** #define SBAL_EFLAGS_LAST_ENTRY 0x40
* struct sbal_flags - storage block address list flags #define SBAL_EFLAGS_CONTIGUOUS 0x20
* @last: last entry #define SBAL_EFLAGS_FIRST_FRAG 0x04
* @cont: contiguous storage #define SBAL_EFLAGS_MIDDLE_FRAG 0x08
* @frag: fragmentation #define SBAL_EFLAGS_LAST_FRAG 0x0c
*/ #define SBAL_EFLAGS_MASK 0x6f
struct sbal_flags {
u8 : 1;
u8 last : 1;
u8 cont : 1;
u8 : 1;
u8 frag : 2;
u8 : 2;
} __attribute__ ((packed));
#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL #define SBAL_SFLAGS0_PCI_REQ 0x40
#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20
/* Awesome OpenFCP extensions */ /* Awesome OpenFCP extensions */
#define SBAL_FLAGS0_TYPE_STATUS 0x00UL #define SBAL_SFLAGS0_TYPE_STATUS 0x00
#define SBAL_FLAGS0_TYPE_WRITE 0x08UL #define SBAL_SFLAGS0_TYPE_WRITE 0x08
#define SBAL_FLAGS0_TYPE_READ 0x10UL #define SBAL_SFLAGS0_TYPE_READ 0x10
#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL #define SBAL_SFLAGS0_TYPE_WRITE_READ 0x18
#define SBAL_FLAGS0_MORE_SBALS 0x04UL #define SBAL_SFLAGS0_MORE_SBALS 0x04
#define SBAL_FLAGS0_COMMAND 0x02UL #define SBAL_SFLAGS0_COMMAND 0x02
#define SBAL_FLAGS0_LAST_SBAL 0x00UL #define SBAL_SFLAGS0_LAST_SBAL 0x00
#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND #define SBAL_SFLAGS0_ONLY_SBAL SBAL_SFLAGS0_COMMAND
#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS #define SBAL_SFLAGS0_MIDDLE_SBAL SBAL_SFLAGS0_MORE_SBALS
#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND #define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND)
#define SBAL_FLAGS0_PCI 0x40
/**
* struct sbal_sbalf_0 - sbal flags for sbale 0
* @pci: PCI indicator
* @cont: data continuation
* @sbtype: storage-block type (FCP)
*/
struct sbal_sbalf_0 {
u8 : 1;
u8 pci : 1;
u8 cont : 1;
u8 sbtype : 2;
u8 : 3;
} __attribute__ ((packed));
/**
* struct sbal_sbalf_1 - sbal flags for sbale 1
* @key: storage key
*/
struct sbal_sbalf_1 {
u8 : 4;
u8 key : 4;
} __attribute__ ((packed));
/**
* struct sbal_sbalf_14 - sbal flags for sbale 14
* @erridx: error index
*/
struct sbal_sbalf_14 {
u8 : 4;
u8 erridx : 4;
} __attribute__ ((packed));
/**
* struct sbal_sbalf_15 - sbal flags for sbale 15
* @reason: reason for error state
*/
struct sbal_sbalf_15 {
u8 reason;
} __attribute__ ((packed));
/**
* union sbal_sbalf - storage block address list flags
* @i0: sbalf0
* @i1: sbalf1
* @i14: sbalf14
* @i15: sblaf15
* @value: raw value
*/
union sbal_sbalf {
struct sbal_sbalf_0 i0;
struct sbal_sbalf_1 i1;
struct sbal_sbalf_14 i14;
struct sbal_sbalf_15 i15;
u8 value;
};
/** /**
* struct qdio_buffer_element - SBAL entry * struct qdio_buffer_element - SBAL entry
* @flags: flags * @eflags: SBAL entry flags
* @scount: SBAL count
* @sflags: whole SBAL flags
* @length: length * @length: length
* @addr: address * @addr: address
*/ */
struct qdio_buffer_element { struct qdio_buffer_element {
u32 flags; u8 eflags;
/* private: */
u8 res1;
/* public: */
u8 scount;
u8 sflags;
u32 length; u32 length;
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
/* private: */ /* private: */
void *reserved; void *res2;
/* public: */ /* public: */
#endif #endif
void *addr; void *addr;
......
...@@ -26,67 +26,60 @@ ...@@ -26,67 +26,60 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/smp.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
struct mmu_gather { struct mmu_gather {
struct mm_struct *mm; struct mm_struct *mm;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch;
#endif
unsigned int fullmm; unsigned int fullmm;
unsigned int nr_ptes; unsigned int need_flush;
unsigned int nr_pxds;
unsigned int max;
void **array;
void *local[8];
}; };
static inline void __tlb_alloc_page(struct mmu_gather *tlb) #ifdef CONFIG_HAVE_RCU_TABLE_FREE
{ struct mmu_table_batch {
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); struct rcu_head rcu;
unsigned int nr;
void *tables[0];
};
if (addr) { #define MAX_TABLE_BATCH \
tlb->array = (void *) addr; ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
tlb->max = PAGE_SIZE / sizeof(void *);
} extern void tlb_table_flush(struct mmu_gather *tlb);
} extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#endif
static inline void tlb_gather_mmu(struct mmu_gather *tlb, static inline void tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm, struct mm_struct *mm,
unsigned int full_mm_flush) unsigned int full_mm_flush)
{ {
tlb->mm = mm; tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->array = tlb->local;
tlb->fullmm = full_mm_flush; tlb->fullmm = full_mm_flush;
tlb->need_flush = 0;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
if (tlb->fullmm) if (tlb->fullmm)
__tlb_flush_mm(mm); __tlb_flush_mm(mm);
else
__tlb_alloc_page(tlb);
tlb->nr_ptes = 0;
tlb->nr_pxds = tlb->max;
} }
static inline void tlb_flush_mmu(struct mmu_gather *tlb) static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{ {
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max)) if (!tlb->need_flush)
__tlb_flush_mm(tlb->mm); return;
while (tlb->nr_ptes > 0) tlb->need_flush = 0;
page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); __tlb_flush_mm(tlb->mm);
while (tlb->nr_pxds < tlb->max) #ifdef CONFIG_HAVE_RCU_TABLE_FREE
crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); tlb_table_flush(tlb);
#endif
} }
static inline void tlb_finish_mmu(struct mmu_gather *tlb, static inline void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
tlb_flush_mmu(tlb); tlb_flush_mmu(tlb);
rcu_table_freelist_finish();
/* keep the page table cache within bounds */
check_pgt_cache();
if (tlb->array != tlb->local)
free_pages((unsigned long) tlb->array, 0);
} }
/* /*
...@@ -112,12 +105,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) ...@@ -112,12 +105,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long address) unsigned long address)
{ {
if (!tlb->fullmm) { #ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->array[tlb->nr_ptes++] = pte; if (!tlb->fullmm)
if (tlb->nr_ptes >= tlb->nr_pxds) return page_table_free_rcu(tlb, (unsigned long *) pte);
tlb_flush_mmu(tlb); #endif
} else page_table_free(tlb->mm, (unsigned long *) pte);
page_table_free(tlb->mm, (unsigned long *) pte);
} }
/* /*
...@@ -133,12 +125,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, ...@@ -133,12 +125,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
#ifdef __s390x__ #ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 31)) if (tlb->mm->context.asce_limit <= (1UL << 31))
return; return;
if (!tlb->fullmm) { #ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->array[--tlb->nr_pxds] = pmd; if (!tlb->fullmm)
if (tlb->nr_ptes >= tlb->nr_pxds) return tlb_remove_table(tlb, pmd);
tlb_flush_mmu(tlb); #endif
} else crst_table_free(tlb->mm, (unsigned long *) pmd);
crst_table_free(tlb->mm, (unsigned long *) pmd);
#endif #endif
} }
...@@ -155,12 +146,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ...@@ -155,12 +146,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#ifdef __s390x__ #ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 42)) if (tlb->mm->context.asce_limit <= (1UL << 42))
return; return;
if (!tlb->fullmm) { #ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->array[--tlb->nr_pxds] = pud; if (!tlb->fullmm)
if (tlb->nr_ptes >= tlb->nr_pxds) return tlb_remove_table(tlb, pud);
tlb_flush_mmu(tlb); #endif
} else crst_table_free(tlb->mm, (unsigned long *) pud);
crst_table_free(tlb->mm, (unsigned long *) pud);
#endif #endif
} }
......
...@@ -731,6 +731,7 @@ static int __init kvm_s390_init(void) ...@@ -731,6 +731,7 @@ static int __init kvm_s390_init(void)
} }
memcpy(facilities, S390_lowcore.stfle_fac_list, 16); memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
facilities[0] &= 0xff00fff3f47c0000ULL; facilities[0] &= 0xff00fff3f47c0000ULL;
facilities[1] &= 0x201c000000000000ULL;
return 0; return 0;
} }
......
...@@ -93,4 +93,6 @@ sie_err: ...@@ -93,4 +93,6 @@ sie_err:
.section __ex_table,"a" .section __ex_table,"a"
.quad sie_inst,sie_err .quad sie_inst,sie_err
.quad sie_exit,sie_err
.quad sie_reenter,sie_err
.previous .previous
...@@ -24,94 +24,12 @@ ...@@ -24,94 +24,12 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
struct rcu_table_freelist {
struct rcu_head rcu;
struct mm_struct *mm;
unsigned int pgt_index;
unsigned int crst_index;
unsigned long *table[0];
};
#define RCU_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
/ sizeof(unsigned long))
static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
static void __page_table_free(struct mm_struct *mm, unsigned long *table);
static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
{
struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
struct rcu_table_freelist *batch = *batchp;
if (batch)
return batch;
batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
if (batch) {
batch->mm = mm;
batch->pgt_index = 0;
batch->crst_index = RCU_FREELIST_SIZE;
*batchp = batch;
}
return batch;
}
static void rcu_table_freelist_callback(struct rcu_head *head)
{
struct rcu_table_freelist *batch =
container_of(head, struct rcu_table_freelist, rcu);
while (batch->pgt_index > 0)
__page_table_free(batch->mm, batch->table[--batch->pgt_index]);
while (batch->crst_index < RCU_FREELIST_SIZE)
crst_table_free(batch->mm, batch->table[batch->crst_index++]);
free_page((unsigned long) batch);
}
void rcu_table_freelist_finish(void)
{
struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
struct rcu_table_freelist *batch = *batchp;
if (!batch)
goto out;
call_rcu(&batch->rcu, rcu_table_freelist_callback);
*batchp = NULL;
out:
put_cpu_var(rcu_table_freelist);
}
static void smp_sync(void *arg)
{
}
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
#define ALLOC_ORDER 1 #define ALLOC_ORDER 1
#define TABLES_PER_PAGE 4 #define FRAG_MASK 0x0f
#define FRAG_MASK 15UL
#define SECOND_HALVES 10UL
void clear_table_pgstes(unsigned long *table)
{
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
memset(table + 256, 0, PAGE_SIZE/4);
clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
memset(table + 768, 0, PAGE_SIZE/4);
}
#else #else
#define ALLOC_ORDER 2 #define ALLOC_ORDER 2
#define TABLES_PER_PAGE 2 #define FRAG_MASK 0x03
#define FRAG_MASK 3UL
#define SECOND_HALVES 2UL
void clear_table_pgstes(unsigned long *table)
{
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
memset(table + 256, 0, PAGE_SIZE/2);
}
#endif #endif
unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
...@@ -140,29 +58,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) ...@@ -140,29 +58,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
free_pages((unsigned long) table, ALLOC_ORDER); free_pages((unsigned long) table, ALLOC_ORDER);
} }
void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
{
struct rcu_table_freelist *batch;
preempt_disable();
if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
crst_table_free(mm, table);
goto out;
}
batch = rcu_table_freelist_get(mm);
if (!batch) {
smp_call_function(smp_sync, NULL, 1);
crst_table_free(mm, table);
goto out;
}
batch->table[--batch->crst_index] = table;
if (batch->pgt_index >= batch->crst_index)
rcu_table_freelist_finish();
out:
preempt_enable();
}
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{ {
...@@ -238,124 +133,175 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) ...@@ -238,124 +133,175 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
} }
#endif #endif
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
{
unsigned int old, new;
do {
old = atomic_read(v);
new = old ^ bits;
} while (atomic_cmpxchg(v, old, new) != old);
return new;
}
/* /*
* page table entry allocation/free routines. * page table entry allocation/free routines.
*/ */
#ifdef CONFIG_PGSTE
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
{
struct page *page;
unsigned long *table;
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
pgtable_page_ctor(page);
atomic_set(&page->_mapcount, 3);
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
return table;
}
static inline void page_table_free_pgste(unsigned long *table)
{
struct page *page;
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
pgtable_page_ctor(page);
atomic_set(&page->_mapcount, -1);
__free_page(page);
}
#endif
unsigned long *page_table_alloc(struct mm_struct *mm) unsigned long *page_table_alloc(struct mm_struct *mm)
{ {
struct page *page; struct page *page;
unsigned long *table; unsigned long *table;
unsigned long bits; unsigned int mask, bit;
bits = (mm->context.has_pgste) ? 3UL : 1UL; #ifdef CONFIG_PGSTE
if (mm_has_pgste(mm))
return page_table_alloc_pgste(mm);
#endif
/* Allocate fragments of a 4K page as 1K/2K page table */
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
page = NULL; mask = FRAG_MASK;
if (!list_empty(&mm->context.pgtable_list)) { if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list, page = list_first_entry(&mm->context.pgtable_list,
struct page, lru); struct page, lru);
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) table = (unsigned long *) page_to_phys(page);
page = NULL; mask = atomic_read(&page->_mapcount);
mask = mask | (mask >> 4);
} }
if (!page) { if ((mask & FRAG_MASK) == FRAG_MASK) {
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.list_lock);
page = alloc_page(GFP_KERNEL|__GFP_REPEAT); page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page) if (!page)
return NULL; return NULL;
pgtable_page_ctor(page); pgtable_page_ctor(page);
page->flags &= ~FRAG_MASK; atomic_set(&page->_mapcount, 1);
table = (unsigned long *) page_to_phys(page); table = (unsigned long *) page_to_phys(page);
if (mm->context.has_pgste) clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
clear_table_pgstes(table);
else
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list); list_add(&page->lru, &mm->context.pgtable_list);
} else {
for (bit = 1; mask & bit; bit <<= 1)
table += PTRS_PER_PTE;
mask = atomic_xor_bits(&page->_mapcount, bit);
if ((mask & FRAG_MASK) == FRAG_MASK)
list_del(&page->lru);
} }
table = (unsigned long *) page_to_phys(page);
while (page->flags & bits) {
table += 256;
bits <<= 1;
}
page->flags |= bits;
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
list_move_tail(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.list_lock);
return table; return table;
} }
static void __page_table_free(struct mm_struct *mm, unsigned long *table) void page_table_free(struct mm_struct *mm, unsigned long *table)
{ {
struct page *page; struct page *page;
unsigned long bits; unsigned int bit, mask;
bits = ((unsigned long) table) & 15; #ifdef CONFIG_PGSTE
table = (unsigned long *)(((unsigned long) table) ^ bits); if (mm_has_pgste(mm))
return page_table_free_pgste(table);
#endif
/* Free 1K/2K page table fragment of a 4K page */
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
page->flags ^= bits; bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
if (!(page->flags & FRAG_MASK)) { spin_lock_bh(&mm->context.list_lock);
if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
list_del(&page->lru);
mask = atomic_xor_bits(&page->_mapcount, bit);
if (mask & FRAG_MASK)
list_add(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.list_lock);
if (mask == 0) {
pgtable_page_dtor(page); pgtable_page_dtor(page);
atomic_set(&page->_mapcount, -1);
__free_page(page); __free_page(page);
} }
} }
void page_table_free(struct mm_struct *mm, unsigned long *table) #ifdef CONFIG_HAVE_RCU_TABLE_FREE
static void __page_table_free_rcu(void *table, unsigned bit)
{ {
struct page *page; struct page *page;
unsigned long bits;
bits = (mm->context.has_pgste) ? 3UL : 1UL; #ifdef CONFIG_PGSTE
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); if (bit == FRAG_MASK)
return page_table_free_pgste(table);
#endif
/* Free 1K/2K page table fragment of a 4K page */
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock); if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
page->flags ^= bits;
if (page->flags & FRAG_MASK) {
/* Page now has some free pgtable fragments. */
if (!list_empty(&page->lru))
list_move(&page->lru, &mm->context.pgtable_list);
page = NULL;
} else
/* All fragments of the 4K page have been freed. */
list_del(&page->lru);
spin_unlock_bh(&mm->context.list_lock);
if (page) {
pgtable_page_dtor(page); pgtable_page_dtor(page);
atomic_set(&page->_mapcount, -1);
__free_page(page); __free_page(page);
} }
} }
void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
{ {
struct rcu_table_freelist *batch; struct mm_struct *mm;
struct page *page; struct page *page;
unsigned long bits; unsigned int bit, mask;
preempt_disable(); mm = tlb->mm;
if (atomic_read(&mm->mm_users) < 2 && #ifdef CONFIG_PGSTE
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { if (mm_has_pgste(mm)) {
page_table_free(mm, table); table = (unsigned long *) (__pa(table) | FRAG_MASK);
goto out; tlb_remove_table(tlb, table);
} return;
batch = rcu_table_freelist_get(mm);
if (!batch) {
smp_call_function(smp_sync, NULL, 1);
page_table_free(mm, table);
goto out;
} }
bits = (mm->context.has_pgste) ? 3UL : 1UL; #endif
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.list_lock);
/* Delayed freeing with rcu prevents reuse of pgtable fragments */ if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
list_del_init(&page->lru); list_del(&page->lru);
mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
if (mask & FRAG_MASK)
list_add_tail(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.list_lock);
table = (unsigned long *)(((unsigned long) table) | bits); table = (unsigned long *) (__pa(table) | (bit << 4));
batch->table[batch->pgt_index++] = table; tlb_remove_table(tlb, table);
if (batch->pgt_index >= batch->crst_index)
rcu_table_freelist_finish();
out:
preempt_enable();
} }
void __tlb_remove_table(void *_table)
{
void *table = (void *)((unsigned long) _table & PAGE_MASK);
unsigned type = (unsigned long) _table & ~PAGE_MASK;
if (type)
__page_table_free_rcu(table, type);
else
free_pages((unsigned long) table, ALLOC_ORDER);
}
#endif
/* /*
* switch on pgstes for its userspace process (for kvm) * switch on pgstes for its userspace process (for kvm)
*/ */
...@@ -369,7 +315,7 @@ int s390_enable_sie(void) ...@@ -369,7 +315,7 @@ int s390_enable_sie(void)
return -EINVAL; return -EINVAL;
/* Do we have pgstes? if yes, we are done */ /* Do we have pgstes? if yes, we are done */
if (tsk->mm->context.has_pgste) if (mm_has_pgste(tsk->mm))
return 0; return 0;
/* lets check if we are allowed to replace the mm */ /* lets check if we are allowed to replace the mm */
......
...@@ -26,7 +26,6 @@ config SPARC ...@@ -26,7 +26,6 @@ config SPARC
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select USE_GENERIC_SMP_HELPERS if SMP select USE_GENERIC_SMP_HELPERS if SMP
...@@ -528,6 +527,23 @@ config PCI_DOMAINS ...@@ -528,6 +527,23 @@ config PCI_DOMAINS
config PCI_SYSCALL config PCI_SYSCALL
def_bool PCI def_bool PCI
config PCIC_PCI
bool
depends on PCI && SPARC32 && !SPARC_LEON
default y
config LEON_PCI
bool
depends on PCI && SPARC_LEON
default y
config GRPCI2
bool "GRPCI2 Host Bridge Support"
depends on LEON_PCI
default y
help
Say Y here to include the GRPCI2 Host Bridge Driver.
source "drivers/pci/Kconfig" source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig" source "drivers/pcmcia/Kconfig"
......
...@@ -138,7 +138,7 @@ static unsigned char sun_82072_fd_inb(int port) ...@@ -138,7 +138,7 @@ static unsigned char sun_82072_fd_inb(int port)
return sun_fdc->data_82072; return sun_fdc->data_82072;
case 7: /* FD_DIR */ case 7: /* FD_DIR */
return sun_read_dir(); return sun_read_dir();
}; }
panic("sun_82072_fd_inb: How did I get here?"); panic("sun_82072_fd_inb: How did I get here?");
} }
...@@ -161,7 +161,7 @@ static void sun_82072_fd_outb(unsigned char value, int port) ...@@ -161,7 +161,7 @@ static void sun_82072_fd_outb(unsigned char value, int port)
case 4: /* FD_STATUS */ case 4: /* FD_STATUS */
sun_fdc->status_82072 = value; sun_fdc->status_82072 = value;
break; break;
}; }
return; return;
} }
...@@ -186,7 +186,7 @@ static unsigned char sun_82077_fd_inb(int port) ...@@ -186,7 +186,7 @@ static unsigned char sun_82077_fd_inb(int port)
return sun_fdc->data_82077; return sun_fdc->data_82077;
case 7: /* FD_DIR */ case 7: /* FD_DIR */
return sun_read_dir(); return sun_read_dir();
}; }
panic("sun_82077_fd_inb: How did I get here?"); panic("sun_82077_fd_inb: How did I get here?");
} }
...@@ -212,7 +212,7 @@ static void sun_82077_fd_outb(unsigned char value, int port) ...@@ -212,7 +212,7 @@ static void sun_82077_fd_outb(unsigned char value, int port)
case 3: /* FD_TDR */ case 3: /* FD_TDR */
sun_fdc->tapectl_82077 = value; sun_fdc->tapectl_82077 = value;
break; break;
}; }
return; return;
} }
......
...@@ -111,7 +111,7 @@ static unsigned char sun_82077_fd_inb(unsigned long port) ...@@ -111,7 +111,7 @@ static unsigned char sun_82077_fd_inb(unsigned long port)
case 7: /* FD_DIR */ case 7: /* FD_DIR */
/* XXX: Is DCL on 0x80 in sun4m? */ /* XXX: Is DCL on 0x80 in sun4m? */
return sbus_readb(&sun_fdc->dir_82077); return sbus_readb(&sun_fdc->dir_82077);
}; }
panic("sun_82072_fd_inb: How did I get here?"); panic("sun_82072_fd_inb: How did I get here?");
} }
...@@ -135,7 +135,7 @@ static void sun_82077_fd_outb(unsigned char value, unsigned long port) ...@@ -135,7 +135,7 @@ static void sun_82077_fd_outb(unsigned char value, unsigned long port)
case 4: /* FD_STATUS */ case 4: /* FD_STATUS */
sbus_writeb(value, &sun_fdc->status_82077); sbus_writeb(value, &sun_fdc->status_82077);
break; break;
}; }
return; return;
} }
......
...@@ -318,6 +318,9 @@ struct device_node; ...@@ -318,6 +318,9 @@ struct device_node;
extern unsigned int leon_build_device_irq(unsigned int real_irq, extern unsigned int leon_build_device_irq(unsigned int real_irq,
irq_flow_handler_t flow_handler, irq_flow_handler_t flow_handler,
const char *name, int do_ack); const char *name, int do_ack);
extern void leon_update_virq_handling(unsigned int virq,
irq_flow_handler_t flow_handler,
const char *name, int do_ack);
extern void leon_clear_clock_irq(void); extern void leon_clear_clock_irq(void);
extern void leon_load_profile_irq(int cpu, unsigned int limit); extern void leon_load_profile_irq(int cpu, unsigned int limit);
extern void leon_init_timers(irq_handler_t counter_fn); extern void leon_init_timers(irq_handler_t counter_fn);
......
/*
* asm/leon_pci.h
*
* Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
*/
#ifndef _ASM_LEON_PCI_H_
#define _ASM_LEON_PCI_H_
/* PCI related definitions */
struct leon_pci_info {
struct pci_ops *ops;
struct resource io_space;
struct resource mem_space;
int (*map_irq)(struct pci_dev *dev, u8 slot, u8 pin);
};
extern void leon_pci_init(struct platform_device *ofdev,
struct leon_pci_info *info);
#endif /* _ASM_LEON_PCI_H_ */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment