Commit cfd72a4c authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

drm-intel-next-2014-01-10:
- final bits for runtime D3 on Haswell from Paul (now enabled fully)
- parse the backlight modulation freq information in the VBT from Jani
  (but not yet used)
- more watermark improvements from Ville for ilk-ivb and bdw
- bugfixes for fastboot from Jesse
- watermark fix for i830M (but not yet everything)
- vlv vga hotplug w/a (Imre)
- piles of other small improvements, cleanups and fixes all over

Note that the pull request includes a backmerge of the last drm-fixes
pulled into Linus' tree - things where getting a bit too messy. So the
shortlog also contains a bunch of patches from Linus tree. Please yell if
you want me to frob it for you a bit.

* 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel: (609 commits)
  drm/i915/bdw: make sure south port interrupts are enabled properly v2
  drm/i915: Include more information in disabled hotplug interrupt warning
  drm/i915: Only complain about a rogue hotplug IRQ after disabling
  drm/i915: Only WARN about a stuck hotplug irq ONCE
  drm/i915: s/hotplugt_status_gen4/hotplug_status_g4x/
parents 9354eafd 0d9d349d
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include "powernv.h" #include "powernv.h"
#include "pci.h" #include "pci.h"
static char *hub_diag = NULL;
static int ioda_eeh_nb_init = 0; static int ioda_eeh_nb_init = 0;
static int ioda_eeh_event(struct notifier_block *nb, static int ioda_eeh_event(struct notifier_block *nb,
...@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose) ...@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
ioda_eeh_nb_init = 1; ioda_eeh_nb_init = 1;
} }
/* We needn't HUB diag-data on PHB3 */
if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!hub_diag) {
pr_err("%s: Out of memory !\n", __func__);
return -ENOMEM;
}
}
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
if (phb->dbgfs) { if (phb->dbgfs) {
debugfs_create_file("err_injct_outbound", 0600, debugfs_create_file("err_injct_outbound", 0600,
...@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) ...@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
static void ioda_eeh_hub_diag(struct pci_controller *hose) static void ioda_eeh_hub_diag(struct pci_controller *hose)
{ {
struct pnv_phb *phb = hose->private_data; struct pnv_phb *phb = hose->private_data;
struct OpalIoP7IOCErrorData *data; struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
long rc; long rc;
data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
if (rc != OPAL_SUCCESS) { if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
__func__, phb->hub_id, rc); __func__, phb->hub_id, rc);
...@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose) ...@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
struct OpalIoPhbErrorCommon *common; struct OpalIoPhbErrorCommon *common;
long rc; long rc;
common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); PNV_PCI_DIAG_BUF_SIZE);
if (rc != OPAL_SUCCESS) { if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
__func__, hose->global_number, rc); __func__, hose->global_number, rc);
return; return;
} }
common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
switch (common->ioType) { switch (common->ioType) {
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
ioda_eeh_p7ioc_phb_diag(hose, common); ioda_eeh_p7ioc_phb_diag(hose, common);
......
...@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1; ...@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1;
static u8 opal_lpc_inb(unsigned long port) static u8 opal_lpc_inb(unsigned long port)
{ {
int64_t rc; int64_t rc;
uint32_t data; __be32 data;
if (opal_lpc_chip_id < 0 || port > 0xffff) if (opal_lpc_chip_id < 0 || port > 0xffff)
return 0xff; return 0xff;
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
return rc ? 0xff : data; return rc ? 0xff : be32_to_cpu(data);
} }
static __le16 __opal_lpc_inw(unsigned long port) static __le16 __opal_lpc_inw(unsigned long port)
{ {
int64_t rc; int64_t rc;
uint32_t data; __be32 data;
if (opal_lpc_chip_id < 0 || port > 0xfffe) if (opal_lpc_chip_id < 0 || port > 0xfffe)
return 0xffff; return 0xffff;
if (port & 1) if (port & 1)
return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
return rc ? 0xffff : data; return rc ? 0xffff : be32_to_cpu(data);
} }
static u16 opal_lpc_inw(unsigned long port) static u16 opal_lpc_inw(unsigned long port)
{ {
...@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port) ...@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port)
static __le32 __opal_lpc_inl(unsigned long port) static __le32 __opal_lpc_inl(unsigned long port)
{ {
int64_t rc; int64_t rc;
uint32_t data; __be32 data;
if (opal_lpc_chip_id < 0 || port > 0xfffc) if (opal_lpc_chip_id < 0 || port > 0xfffc)
return 0xffffffff; return 0xffffffff;
...@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port) ...@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port)
(__le32)opal_lpc_inb(port + 2) << 8 | (__le32)opal_lpc_inb(port + 2) << 8 |
opal_lpc_inb(port + 3); opal_lpc_inb(port + 3);
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
return rc ? 0xffffffff : data; return rc ? 0xffffffff : be32_to_cpu(data);
} }
static u32 opal_lpc_inl(unsigned long port) static u32 opal_lpc_inl(unsigned long port)
......
...@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) ...@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
{ {
struct opal_scom_map *m = map; struct opal_scom_map *m = map;
int64_t rc; int64_t rc;
__be64 v;
reg = opal_scom_unmangle(reg); reg = opal_scom_unmangle(reg);
rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
*value = be64_to_cpu(v);
return opal_xscom_err_xlate(rc); return opal_xscom_err_xlate(rc);
} }
......
...@@ -172,11 +172,13 @@ struct pnv_phb { ...@@ -172,11 +172,13 @@ struct pnv_phb {
} ioda; } ioda;
}; };
/* PHB status structure */ /* PHB and hub status structure */
union { union {
unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
struct OpalIoP7IOCPhbErrorData p7ioc; struct OpalIoP7IOCPhbErrorData p7ioc;
struct OpalIoP7IOCErrorData hub_diag;
} diag; } diag;
}; };
extern struct pci_ops pnv_pci_ops; extern struct pci_ops pnv_pci_ops;
......
...@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m) ...@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m)
{ {
struct hvcall_ppp_data ppp_data; struct hvcall_ppp_data ppp_data;
struct device_node *root; struct device_node *root;
const int *perf_level; const __be32 *perf_level;
int rc; int rc;
rc = h_get_ppp(&ppp_data); rc = h_get_ppp(&ppp_data);
...@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m) ...@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m)
perf_level = of_get_property(root, perf_level = of_get_property(root,
"ibm,partition-performance-parameters-level", "ibm,partition-performance-parameters-level",
NULL); NULL);
if (perf_level && (*perf_level >= 1)) { if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
seq_printf(m, seq_printf(m,
"physical_procs_allocated_to_virtualization=%d\n", "physical_procs_allocated_to_virtualization=%d\n",
ppp_data.phys_platform_procs); ppp_data.phys_platform_procs);
...@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) ...@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
int partition_potential_processors; int partition_potential_processors;
int partition_active_processors; int partition_active_processors;
struct device_node *rtas_node; struct device_node *rtas_node;
const int *lrdrp = NULL; const __be32 *lrdrp = NULL;
rtas_node = of_find_node_by_path("/rtas"); rtas_node = of_find_node_by_path("/rtas");
if (rtas_node) if (rtas_node)
...@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) ...@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
if (lrdrp == NULL) { if (lrdrp == NULL) {
partition_potential_processors = vdso_data->processorCount; partition_potential_processors = vdso_data->processorCount;
} else { } else {
partition_potential_processors = *(lrdrp + 4); partition_potential_processors = be32_to_cpup(lrdrp + 4);
} }
of_node_put(rtas_node); of_node_put(rtas_node);
...@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v) ...@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
const char *model = ""; const char *model = "";
const char *system_id = ""; const char *system_id = "";
const char *tmp; const char *tmp;
const unsigned int *lp_index_ptr; const __be32 *lp_index_ptr;
unsigned int lp_index = 0; unsigned int lp_index = 0;
seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
...@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v) ...@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
NULL); NULL);
if (lp_index_ptr) if (lp_index_ptr)
lp_index = *lp_index_ptr; lp_index = be32_to_cpup(lp_index_ptr);
of_node_put(rootdn); of_node_put(rootdn);
} }
seq_printf(m, "serial_number=%s\n", system_id); seq_printf(m, "serial_number=%s\n", system_id);
......
...@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) ...@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
{ {
struct device_node *dn; struct device_node *dn;
struct pci_dn *pdn; struct pci_dn *pdn;
const u32 *req_msi; const __be32 *p;
u32 req_msi;
pdn = pci_get_pdn(pdev); pdn = pci_get_pdn(pdev);
if (!pdn) if (!pdn)
...@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) ...@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
dn = pdn->node; dn = pdn->node;
req_msi = of_get_property(dn, prop_name, NULL); p = of_get_property(dn, prop_name, NULL);
if (!req_msi) { if (!p) {
pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
return -ENOENT; return -ENOENT;
} }
if (*req_msi < nvec) { req_msi = be32_to_cpup(p);
if (req_msi < nvec) {
pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
if (*req_msi == 0) /* Be paranoid */ if (req_msi == 0) /* Be paranoid */
return -ENOSPC; return -ENOSPC;
return *req_msi; return req_msi;
} }
return 0; return 0;
...@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec) ...@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
{ {
struct device_node *dn; struct device_node *dn;
const u32 *p; const __be32 *p;
dn = of_node_get(pci_device_to_OF_node(dev)); dn = of_node_get(pci_device_to_OF_node(dev));
while (dn) { while (dn) {
...@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) ...@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
if (p) { if (p) {
pr_debug("rtas_msi: found prop on dn %s\n", pr_debug("rtas_msi: found prop on dn %s\n",
dn->full_name); dn->full_name);
*total = *p; *total = be32_to_cpup(p);
return dn; return dn;
} }
...@@ -232,13 +234,13 @@ struct msi_counts { ...@@ -232,13 +234,13 @@ struct msi_counts {
static void *count_non_bridge_devices(struct device_node *dn, void *data) static void *count_non_bridge_devices(struct device_node *dn, void *data)
{ {
struct msi_counts *counts = data; struct msi_counts *counts = data;
const u32 *p; const __be32 *p;
u32 class; u32 class;
pr_debug("rtas_msi: counting %s\n", dn->full_name); pr_debug("rtas_msi: counting %s\n", dn->full_name);
p = of_get_property(dn, "class-code", NULL); p = of_get_property(dn, "class-code", NULL);
class = p ? *p : 0; class = p ? be32_to_cpup(p) : 0;
if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
counts->num_devices++; counts->num_devices++;
...@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data) ...@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
static void *count_spare_msis(struct device_node *dn, void *data) static void *count_spare_msis(struct device_node *dn, void *data)
{ {
struct msi_counts *counts = data; struct msi_counts *counts = data;
const u32 *p; const __be32 *p;
int req; int req;
if (dn == counts->requestor) if (dn == counts->requestor)
...@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data) ...@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data)
req = 0; req = 0;
p = of_get_property(dn, "ibm,req#msi", NULL); p = of_get_property(dn, "ibm,req#msi", NULL);
if (p) if (p)
req = *p; req = be32_to_cpup(p);
p = of_get_property(dn, "ibm,req#msi-x", NULL); p = of_get_property(dn, "ibm,req#msi-x", NULL);
if (p) if (p)
req = max(req, (int)*p); req = max(req, (int)be32_to_cpup(p));
} }
if (req < counts->quota) if (req < counts->quota)
......
...@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ ...@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
static DEFINE_SPINLOCK(nvram_lock); static DEFINE_SPINLOCK(nvram_lock);
struct err_log_info { struct err_log_info {
int error_type; __be32 error_type;
unsigned int seq_num; __be32 seq_num;
}; };
struct nvram_os_partition { struct nvram_os_partition {
...@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = { ...@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = {
}; };
struct oops_log_info { struct oops_log_info {
u16 version; __be16 version;
u16 report_length; __be16 report_length;
u64 timestamp; __be64 timestamp;
} __attribute__((packed)); } __attribute__((packed));
static void oops_to_nvram(struct kmsg_dumper *dumper, static void oops_to_nvram(struct kmsg_dumper *dumper,
...@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, ...@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
length = part->size; length = part->size;
} }
info.error_type = err_type; info.error_type = cpu_to_be32(err_type);
info.seq_num = error_log_cnt; info.seq_num = cpu_to_be32(error_log_cnt);
tmp_index = part->index; tmp_index = part->index;
...@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff, ...@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff,
} }
if (part->os_partition) { if (part->os_partition) {
*error_log_cnt = info.seq_num; *error_log_cnt = be32_to_cpu(info.seq_num);
*err_type = info.error_type; *err_type = be32_to_cpu(info.error_type);
} }
return 0; return 0;
...@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len) ...@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len)
pr_err("nvram: logging uncompressed oops/panic report\n"); pr_err("nvram: logging uncompressed oops/panic report\n");
return -1; return -1;
} }
oops_hdr->version = OOPS_HDR_VERSION; oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
oops_hdr->report_length = (u16) zipped_len; oops_hdr->report_length = cpu_to_be16(zipped_len);
oops_hdr->timestamp = get_seconds(); oops_hdr->timestamp = cpu_to_be64(get_seconds());
return 0; return 0;
} }
...@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type, ...@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type,
clobbering_unread_rtas_event()) clobbering_unread_rtas_event())
return -1; return -1;
oops_hdr->version = OOPS_HDR_VERSION; oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
oops_hdr->report_length = (u16) size; oops_hdr->report_length = cpu_to_be16(size);
oops_hdr->timestamp = get_seconds(); oops_hdr->timestamp = cpu_to_be64(get_seconds());
if (compressed) if (compressed)
err_type = ERR_TYPE_KERNEL_PANIC_GZ; err_type = ERR_TYPE_KERNEL_PANIC_GZ;
...@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, ...@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
size_t length, hdr_size; size_t length, hdr_size;
oops_hdr = (struct oops_log_info *)buff; oops_hdr = (struct oops_log_info *)buff;
if (oops_hdr->version < OOPS_HDR_VERSION) { if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
/* Old format oops header had 2-byte record size */ /* Old format oops header had 2-byte record size */
hdr_size = sizeof(u16); hdr_size = sizeof(u16);
length = oops_hdr->version; length = be16_to_cpu(oops_hdr->version);
time->tv_sec = 0; time->tv_sec = 0;
time->tv_nsec = 0; time->tv_nsec = 0;
} else { } else {
hdr_size = sizeof(*oops_hdr); hdr_size = sizeof(*oops_hdr);
length = oops_hdr->report_length; length = be16_to_cpu(oops_hdr->report_length);
time->tv_sec = oops_hdr->timestamp; time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
time->tv_nsec = 0; time->tv_nsec = 0;
} }
*buf = kmalloc(length, GFP_KERNEL); *buf = kmalloc(length, GFP_KERNEL);
...@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, ...@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
kmsg_dump_get_buffer(dumper, false, kmsg_dump_get_buffer(dumper, false,
oops_data, oops_data_sz, &text_len); oops_data, oops_data_sz, &text_len);
err_type = ERR_TYPE_KERNEL_PANIC; err_type = ERR_TYPE_KERNEL_PANIC;
oops_hdr->version = OOPS_HDR_VERSION; oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
oops_hdr->report_length = (u16) text_len; oops_hdr->report_length = cpu_to_be16(text_len);
oops_hdr->timestamp = get_seconds(); oops_hdr->timestamp = cpu_to_be64(get_seconds());
} }
(void) nvram_write_os_partition(&oops_log_partition, oops_buf, (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
(int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, (int) (sizeof(*oops_hdr) + text_len), err_type,
++oops_count); ++oops_count);
spin_unlock_irqrestore(&lock, flags); spin_unlock_irqrestore(&lock, flags);
......
...@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) ...@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
{ {
struct device_node *dn, *pdn; struct device_node *dn, *pdn;
struct pci_bus *bus; struct pci_bus *bus;
const uint32_t *pcie_link_speed_stats; const __be32 *pcie_link_speed_stats;
bus = bridge->bus; bus = bridge->bus;
...@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) ...@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0; return 0;
for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, pcie_link_speed_stats = of_get_property(pdn,
"ibm,pcie-link-speed-stats", NULL); "ibm,pcie-link-speed-stats", NULL);
if (pcie_link_speed_stats) if (pcie_link_speed_stats)
break; break;
...@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) ...@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0; return 0;
} }
switch (pcie_link_speed_stats[0]) { switch (be32_to_cpup(pcie_link_speed_stats)) {
case 0x01: case 0x01:
bus->max_bus_speed = PCIE_SPEED_2_5GT; bus->max_bus_speed = PCIE_SPEED_2_5GT;
break; break;
...@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) ...@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
break; break;
} }
switch (pcie_link_speed_stats[1]) { switch (be32_to_cpup(pcie_link_speed_stats)) {
case 0x01: case 0x01:
bus->cur_bus_speed = PCIE_SPEED_2_5GT; bus->cur_bus_speed = PCIE_SPEED_2_5GT;
break; break;
......
...@@ -135,7 +135,6 @@ config S390 ...@@ -135,7 +135,6 @@ config S390
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16 if 32BIT select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select INIT_ALL_POSSIBLE
select KTIME_SCALAR if 32BIT select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select OLD_SIGACTION select OLD_SIGACTION
......
...@@ -31,6 +31,7 @@ extern void smp_yield(void); ...@@ -31,6 +31,7 @@ extern void smp_yield(void);
extern void smp_stop_cpu(void); extern void smp_stop_cpu(void);
extern void smp_cpu_set_polarization(int cpu, int val); extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu); extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
...@@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; } ...@@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { } static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { } static inline void smp_yield(void) { }
static inline void smp_stop_cpu(void) { } static inline void smp_stop_cpu(void) { }
static inline void smp_fill_possible_mask(void) { }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p)
setup_vmcoreinfo(); setup_vmcoreinfo();
setup_lowcore(); setup_lowcore();
smp_fill_possible_mask();
cpu_init(); cpu_init();
s390_init_cpu_topology(); s390_init_cpu_topology();
......
...@@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) ...@@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
return 0; return 0;
} }
static int __init setup_possible_cpus(char *s) static unsigned int setup_possible_cpus __initdata;
{
int max, cpu;
if (kstrtoint(s, 0, &max) < 0) static int __init _setup_possible_cpus(char *s)
return 0; {
init_cpu_possible(cpumask_of(0)); get_option(&s, &setup_possible_cpus);
for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
set_cpu_possible(cpu, true);
return 0; return 0;
} }
early_param("possible_cpus", setup_possible_cpus); early_param("possible_cpus", _setup_possible_cpus);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
...@@ -775,6 +771,17 @@ void __noreturn cpu_die(void) ...@@ -775,6 +771,17 @@ void __noreturn cpu_die(void)
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
void __init smp_fill_possible_mask(void)
{
unsigned int possible, cpu;
possible = setup_possible_cpus;
if (!possible)
possible = MACHINE_IS_VM ? 64 : nr_cpu_ids;
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
set_cpu_possible(cpu, true);
}
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
/* request the 0x1201 emergency signal external interrupt */ /* request the 0x1201 emergency signal external interrupt */
......
...@@ -75,6 +75,7 @@ void zpci_event_availability(void *data) ...@@ -75,6 +75,7 @@ void zpci_event_availability(void *data)
if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED)
break; break;
zdev->state = ZPCI_FN_STATE_CONFIGURED; zdev->state = ZPCI_FN_STATE_CONFIGURED;
zdev->fh = ccdf->fh;
ret = zpci_enable_device(zdev); ret = zpci_enable_device(zdev);
if (ret) if (ret)
break; break;
...@@ -101,6 +102,7 @@ void zpci_event_availability(void *data) ...@@ -101,6 +102,7 @@ void zpci_event_availability(void *data)
if (pdev) if (pdev)
pci_stop_and_remove_bus_device(pdev); pci_stop_and_remove_bus_device(pdev);
zdev->fh = ccdf->fh;
zpci_disable_device(zdev); zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY; zdev->state = ZPCI_FN_STATE_STANDBY;
break; break;
......
...@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic); ...@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_FLATMEM
/* need in pfn_valid macro */
EXPORT_SYMBOL(min_low_pfn);
EXPORT_SYMBOL(max_low_pfn);
#endif
#define DECLARE_EXPORT(name) \ #define DECLARE_EXPORT(name) \
extern void name(void);EXPORT_SYMBOL(name) extern void name(void);EXPORT_SYMBOL(name)
......
...@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \ ...@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
checksum.o strlen.o div64.o div64-generic.o checksum.o strlen.o div64.o div64-generic.o
# Extracted from libgcc # Extracted from libgcc
lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
udiv_qrnnd.o udiv_qrnnd.o
......
...@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte) ...@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
} }
#define pte_accessible pte_accessible #define pte_accessible pte_accessible
static inline unsigned long pte_accessible(pte_t a) static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{ {
return pte_val(a) & _PAGE_VALID; return pte_val(a) & _PAGE_VALID;
} }
...@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine. * and SUN4V pte layout, so this inline test is fine.
*/ */
if (likely(mm != &init_mm) && pte_accessible(orig)) if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, addr, ptep, orig, fullmm); tlb_batch_add(mm, addr, ptep, orig, fullmm);
} }
......
...@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long); ...@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
extern __must_check long strlen_user(const char __user *str); extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n); extern __must_check long strnlen_user(const char __user *str, long n);
#define __copy_to_user_inatomic ___copy_to_user #define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic ___copy_from_user #define __copy_from_user_inatomic __copy_from_user
struct pt_regs; struct pt_regs;
extern unsigned long compute_effective_address(struct pt_regs *, extern unsigned long compute_effective_address(struct pt_regs *,
......
...@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask) ...@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask)
return 1; return 1;
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) if (dev_is_pci(dev))
return pci64_dma_supported(to_pci_dev(dev), device_mask); return pci64_dma_supported(to_pci_dev(dev), device_mask);
#endif #endif
......
...@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops); ...@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops);
*/ */
int dma_supported(struct device *dev, u64 mask) int dma_supported(struct device *dev, u64 mask)
{ {
#ifdef CONFIG_PCI if (dev_is_pci(dev))
if (dev->bus == &pci_bus_type)
return 1; return 1;
#endif
return 0; return 0;
} }
EXPORT_SYMBOL(dma_supported); EXPORT_SYMBOL(dma_supported);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/kgdb.h> #include <linux/kgdb.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/context_tracking.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/kdebug.h> #include <asm/kdebug.h>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment