#include <asm/vio.h>
-static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
+static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
+ u64 buffer[PLPAR_HCALL_BUFSIZE];
+ size_t size = max < 8 ? max : 8;
int rc;
- rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
+ rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
if (rc != H_SUCCESS) {
pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
return -EIO;
}
+ memcpy(data, buffer, size);
/* The hypervisor interface returns 64 bits */
- return 8;
+ return size;
}
/**
static struct hwrng pseries_rng = {
.name = KBUILD_MODNAME,
- .data_read = pseries_rng_data_read,
+ .read = pseries_rng_read,
};
static int __init pseries_rng_probe(struct vio_dev *dev,
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
- int ret = 0;
+ int ret = -ENOMEM;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
- return -ENOMEM;
+ return ret;
}
- init_job_desc(desc, 0);
-
dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_in)) {
dev_err(jrdev, "unable to map key input memory\n");
- kfree(desc);
- return -ENOMEM;
+ goto out_free;
}
+
+ dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_out)) {
+ dev_err(jrdev, "unable to map key output memory\n");
+ goto out_unmap_in;
+ }
+
+ init_job_desc(desc, 0);
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_out)) {
- dev_err(jrdev, "unable to map key output memory\n");
- kfree(desc);
- return -ENOMEM;
- }
append_fifo_store(desc, dma_addr_out, split_key_len,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
DMA_FROM_DEVICE);
+out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
-
+out_free:
kfree(desc);
-
return ret;
}
EXPORT_SYMBOL(gen_split_key);
struct dentry *debugfs_dir;
struct list_head list;
struct module *owner;
- uint8_t accel_id;
- uint8_t numa_node;
struct adf_accel_pci accel_pci_dev;
+ uint8_t accel_id;
} __packed;
#endif
WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
ring = &bank->rings[i];
if (hw_data->tx_rings_mask & (1 << i)) {
- ring->inflights = kzalloc_node(sizeof(atomic_t),
- GFP_KERNEL,
- accel_dev->numa_node);
+ ring->inflights =
+ kzalloc_node(sizeof(atomic_t),
+ GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
if (!ring->inflights)
goto err;
} else {
int i, ret;
etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
- accel_dev->numa_node);
+ dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data)
return -ENOMEM;
num_banks = GET_MAX_BANKS(accel_dev);
size = num_banks * sizeof(struct adf_etr_bank_data);
- etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
+ etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data->banks) {
ret = -ENOMEM;
goto err_bank;
if (unlikely(!n))
return -EINVAL;
- bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
+ bufl = kmalloc_node(sz, GFP_ATOMIC,
+ dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!bufl))
return -ENOMEM;
goto err;
for_each_sg(assoc, sg, assoc_n, i) {
+ if (!sg->length)
+ continue;
bufl->bufers[bufs].addr = dma_map_single(dev,
sg_virt(sg),
sg->length,
struct qat_alg_buf *bufers;
buflout = kmalloc_node(sz, GFP_ATOMIC,
- inst->accel_dev->numa_node);
+ dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err;
bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list);
- if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
+ if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
+ dev_to_node(&GET_DEV(accel_dev)) < 0)
+ && adf_dev_started(accel_dev))
break;
accel_dev = NULL;
}
if (!accel_dev) {
- pr_err("QAT: Could not find device on give node\n");
+ pr_err("QAT: Could not find device on node %d\n", node);
accel_dev = adf_devmgr_get_first();
}
if (!accel_dev || !adf_dev_started(accel_dev))
for (i = 0; i < num_inst; i++) {
inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
- accel_dev->numa_node);
+ dev_to_node(&GET_DEV(accel_dev)));
if (!inst)
goto err;
uint64_t reg_val;
admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
- accel_dev->numa_node);
+ dev_to_node(&GET_DEV(accel_dev)));
if (!admin)
return -ENOMEM;
admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
kfree(accel_dev);
}
-static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
-{
- unsigned int bus_per_cpu = 0;
- struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
-
- if (!c->phys_proc_id)
- return 0;
-
- bus_per_cpu = 256 / (c->phys_proc_id + 1);
-
- if (bus_per_cpu != 0)
- return pdev->bus->number / bus_per_cpu;
- return 0;
-}
-
static int qat_dev_start(struct adf_accel_dev *accel_dev)
{
int cpus = num_online_cpus();
void __iomem *pmisc_bar_addr = NULL;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- uint8_t node;
int ret;
switch (ent->device) {
return -ENODEV;
}
- node = adf_get_dev_node_id(pdev);
- accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
+ if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+ /* If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow. */
+ dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
- accel_dev->numa_node = node;
INIT_LIST_HEAD(&accel_dev->crypto_list);
/* Add accel device to accel table.
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
- hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
uint32_t msix_num_entries = hw_data->num_banks + 1;
entries = kzalloc_node(msix_num_entries * sizeof(*entries),
- GFP_KERNEL, accel_dev->numa_node);
+ GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
if (!entries)
return -ENOMEM;
return i915_drm_freeze(drm_dev);
}
+static int i915_pm_freeze_late(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+
+ return intel_suspend_complete(dev_priv);
+}
+
static int i915_pm_thaw_early(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
.freeze = i915_pm_freeze,
+ .freeze_late = i915_pm_freeze_late,
.thaw_early = i915_pm_thaw_early,
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ if (!USES_PPGTT(dev_priv->dev))
+ /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
+ * so RTL will always use the value corresponding to
+ * pat_sel = 000".
+ * So let's disable cache for GGTT to avoid screen corruptions.
+ * MOCS still can be used though.
+ * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
+ * before this patch, i.e. the same uncached + snooping access
+ * like on gen6/7 seems to be in effect.
+ * - So this just fixes blitter/render access. Again it looks
+ * like it's not just uncached access, but uncached + snooping.
+ * So we can still hold onto all our assumptions wrt cpu
+ * clflushing on LLC machines.
+ */
+ pat = GEN8_PPAT(0, GEN8_PPAT_UC);
+
/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
* write would work. */
I915_WRITE(GEN8_PRIVATE_PAT, pat);
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
+ int min;
WARN_ON(panel->backlight.max == 0);
+ /*
+ * XXX: If the vbt value is 255, it makes min equal to max, which leads
+ * to problems. There are such machines out there. Either our
+ * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
+ * against this by letting the minimum be at most (arbitrarily chosen)
+ * 25% of the max.
+ */
+ min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
+ if (min != dev_priv->vbt.backlight.min_brightness) {
+ DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
+ dev_priv->vbt.backlight.min_brightness, min);
+ }
+
/* vbt value is a coefficient in range [0..255] */
- return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
- 0, panel->backlight.max);
+ return scale(min, 0, 255, 0, panel->backlight.max);
}
static int bdw_setup_backlight(struct intel_connector *connector)
/* init the CE partitions. CE only used for gfx on CIK */
radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
- radeon_ring_write(ring, 0xc000);
- radeon_ring_write(ring, 0xc000);
+ radeon_ring_write(ring, 0x8000);
+ radeon_ring_write(ring, 0x8000);
/* setup clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
u32 num_heads = 0, lb_size;
int i;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
{
struct radeon_ib ib;
unsigned i;
+ unsigned index;
int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp = 0;
+ u64 gpu_addr;
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ index = R600_WB_DMA_RING_TEST_OFFSET;
+ else
+ index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+ gpu_addr = rdev->wb.gpu_addr + index;
tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
+ rdev->wb.wb[index/4] = cpu_to_le32(tmp);
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
}
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
+ ib.ptr[1] = lower_32_bits(gpu_addr);
+ ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
+ tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
u32 num_heads = 0, lb_size;
int i;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled) {
{
struct radeon_ib ib;
unsigned i;
+ unsigned index;
int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp = 0;
+ u64 gpu_addr;
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ index = R600_WB_DMA_RING_TEST_OFFSET;
+ else
+ index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
+ gpu_addr = rdev->wb.gpu_addr + index;
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
}
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[1] = lower_32_bits(gpu_addr);
+ ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
ib.ptr[3] = 0xDEADBEEF;
ib.length_dw = 4;
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
+ tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
/* FIXME: implement full support */
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
u32 num_heads = 0, lb_size;
int i;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{}
};
MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
opal = of_find_node_by_path("/ibm,opal/sensors");
if (!opal) {
- dev_err(&pdev->dev, "Opal node 'sensors' not found\n");
+ dev_dbg(&pdev->dev, "Opal node 'sensors' not found\n");
return -ENODEV;
}
err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe);
if (err) {
- pr_err("Platfrom driver probe failed\n");
+ if (err != -ENODEV)
+ pr_err("Platform driver probe failed (%d)\n", err);
+
goto exit_device_del;
}
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+ unsigned long duty;
+ int ret;
- if (ctx->pwm_value)
- return pwm_enable(ctx->pwm);
- return 0;
+ if (ctx->pwm_value == 0)
+ return 0;
+
+ duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM);
+ ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
+ if (ret)
+ return ret;
+ return pwm_enable(ctx->pwm);
}
#endif
goto err_irq_charger;
}
- ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
+ ret = regmap_add_irq_chip(max77693->regmap_muic, max77693->irq,
IRQF_ONESHOT | IRQF_SHARED |
IRQF_TRIGGER_FALLING, 0,
&max77693_muic_irq_chip,
goto err_irq_muic;
}
+ /* Unmask interrupts from all blocks in interrupt source register */
+ ret = regmap_update_bits(max77693->regmap,
+ MAX77693_PMIC_REG_INTSRC_MASK,
+ SRC_IRQ_ALL, (unsigned int)~SRC_IRQ_ALL);
+ if (ret < 0) {
+ dev_err(max77693->dev,
+ "Could not unmask interrupts in INTSRC: %d\n",
+ ret);
+ goto err_intsrc;
+ }
+
pm_runtime_set_active(max77693->dev);
ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
err_mfd:
mfd_remove_devices(max77693->dev);
+err_intsrc:
regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
err_irq_muic:
regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
mutex_unlock(&pcr->pcr_mutex);
}
+#ifdef CONFIG_PM
static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
{
if (pcr->ops->turn_off_led)
if (pcr->ops->force_power_down)
pcr->ops->force_power_down(pcr, pm_state);
}
+#endif
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
#define STMPE24XX_REG_CHIP_ID 0x80
#define STMPE24XX_REG_IEGPIOR_LSB 0x18
#define STMPE24XX_REG_ISGPIOR_MSB 0x19
-#define STMPE24XX_REG_GPMR_LSB 0xA5
+#define STMPE24XX_REG_GPMR_LSB 0xA4
#define STMPE24XX_REG_GPSR_LSB 0x85
#define STMPE24XX_REG_GPCR_LSB 0x88
#define STMPE24XX_REG_GPDR_LSB 0x8B
#define PWR_DEVSLP BIT(1)
#define PWR_DEVOFF BIT(0)
+/* Register bits for CFG_P1_TRANSITION (also for P2 and P3) */
+#define STARTON_SWBUG BIT(7) /* Start on watchdog */
+#define STARTON_VBUS BIT(5) /* Start on VBUS */
+#define STARTON_VBAT BIT(4) /* Start on battery insert */
+#define STARTON_RTC BIT(3) /* Start on RTC */
+#define STARTON_USB BIT(2) /* Start on USB host */
+#define STARTON_CHG BIT(1) /* Start on charger */
+#define STARTON_PWON BIT(0) /* Start on PWRON button */
+
#define SEQ_OFFSYNC (1 << 0)
#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36)
return 0;
}
+static int twl4030_starton_mask_and_set(u8 bitmask, u8 bitvalues)
+{
+ u8 regs[3] = { TWL4030_PM_MASTER_CFG_P1_TRANSITION,
+ TWL4030_PM_MASTER_CFG_P2_TRANSITION,
+ TWL4030_PM_MASTER_CFG_P3_TRANSITION, };
+ u8 val;
+ int i, err;
+
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+ if (err)
+ goto relock;
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
+ TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+ if (err)
+ goto relock;
+
+ for (i = 0; i < sizeof(regs); i++) {
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER,
+ &val, regs[i]);
+ if (err)
+ break;
+ val = (~bitmask & val) | (bitmask & bitvalues);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
+ val, regs[i]);
+ if (err)
+ break;
+ }
+
+ if (err)
+ pr_err("TWL4030 Register access failed: %i\n", err);
+
+relock:
+ return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+}
+
/*
* In master mode, start the power off sequence.
* After a successful execution, TWL shuts down the power to the SoC
{
int err;
+ /* Disable start on charger or VBUS as it can break poweroff */
+ err = twl4030_starton_mask_and_set(STARTON_VBUS | STARTON_CHG, 0);
+ if (err)
+ pr_err("TWL4030 Unable to configure start-up\n");
+
err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
TWL4030_PM_MASTER_P1_SW_EVENTS);
if (err)
version >> 8, version & 0xff,
vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
- ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs,
- ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL);
+ ret = mfd_add_devices(&interface->dev, PLATFORM_DEVID_AUTO,
+ vprbrd_devs, ARRAY_SIZE(vprbrd_devs), NULL, 0,
+ NULL);
if (ret != 0) {
dev_err(&interface->dev, "Failed to add mfd devices to core.");
goto error;
int measure_freq;
int ret;
+ if (!cpufreq_get_current_driver()) {
+ dev_dbg(&pdev->dev, "no cpufreq driver!");
+ return -EPROBE_DEFER;
+ }
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
return ret;
}
+ data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(data->thermal_clk)) {
+ ret = PTR_ERR(data->thermal_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get thermal clk: %d\n", ret);
+ cpufreq_cooling_unregister(data->cdev);
+ return ret;
+ }
+
+ /*
+ * Thermal sensor needs clk on to get correct value, normally
+ * we should enable its clk before taking measurement and disable
+ * clk after measurement is done, but if alarm function is enabled,
+ * hardware will auto measure the temperature periodically, so we
+ * need to keep the clk always on for alarm function.
+ */
+ ret = clk_prepare_enable(data->thermal_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+ cpufreq_cooling_unregister(data->cdev);
+ return ret;
+ }
+
data->tz = thermal_zone_device_register("imx_thermal_zone",
IMX_TRIP_NUM,
BIT(IMX_TRIP_PASSIVE), data,
ret = PTR_ERR(data->tz);
dev_err(&pdev->dev,
"failed to register thermal zone device %d\n", ret);
+ clk_disable_unprepare(data->thermal_clk);
cpufreq_cooling_unregister(data->cdev);
return ret;
}
- data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(data->thermal_clk)) {
- dev_warn(&pdev->dev, "failed to get thermal clk!\n");
- } else {
- /*
- * Thermal sensor needs clk on to get correct value, normally
- * we should enable its clk before taking measurement and disable
- * clk after measurement is done, but if alarm function is enabled,
- * hardware will auto measure the temperature periodically, so we
- * need to keep the clk always on for alarm function.
- */
- ret = clk_prepare_enable(data->thermal_clk);
- if (ret)
- dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
- }
-
/* Enable measurements at ~ 10 Hz */
regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
if (ACPI_FAILURE(status))
return -EIO;
- *temp = DECI_KELVIN_TO_MILLI_CELSIUS(hyst, KELVIN_OFFSET);
+ /*
+ * Thermal hysteresis represents a temperature difference.
+ * Kelvin and Celsius have same degree size. So the
+ * conversion here between tenths of degree Kelvin unit
+ * and Milli-Celsius unit is just to multiply 100.
+ */
+ *temp = hyst * 100;
return 0;
}
static const struct exynos_tmu_registers exynos5260_tmu_registers = {
.triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
.tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL1,
.therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
.therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
.therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
#define EXYNOS_MAX_TRIGGER_PER_REG 4
/* Exynos5260 specific */
-#define EXYNOS_TMU_REG_CONTROL1 0x24
#define EXYNOS5260_TMU_REG_INTEN 0xC0
#define EXYNOS5260_TMU_REG_INTSTAT 0xC4
#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
MAX77693_IRQ_GROUP_NR,
};
+#define SRC_IRQ_CHARGER BIT(0)
+#define SRC_IRQ_TOP BIT(1)
+#define SRC_IRQ_FLASH BIT(2)
+#define SRC_IRQ_MUIC BIT(3)
+#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
+ | SRC_IRQ_FLASH | SRC_IRQ_MUIC)
+
#define LED_IRQ_FLED2_OPEN BIT(0)
#define LED_IRQ_FLED2_SHORT BIT(1)
#define LED_IRQ_FLED1_OPEN BIT(2)
__ring_buffer_alloc((size), (flags), &__key); \
})
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
static_command_line, __start___param,
__stop___param - __start___param,
-1, -1, &unknown_bootoption);
- if (after_dashes)
+ if (!IS_ERR_OR_NULL(after_dashes))
parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
set_init_arg);
* ring_buffer_wait - wait for input to the ring buffer
* @buffer: buffer to wait on
* @cpu: the cpu buffer to wait on
+ * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
*
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
* as data is added to any of the @buffer's cpu buffers. Otherwise
* it will wait for data to be added to a specific cpu buffer.
*/
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
{
- struct ring_buffer_per_cpu *cpu_buffer;
+ struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
DEFINE_WAIT(wait);
struct rb_irq_work *work;
+ int ret = 0;
/*
* Depending on what the caller is waiting for, either any
}
- prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+ while (true) {
+ prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
- /*
- * The events can happen in critical sections where
- * checking a work queue can cause deadlocks.
- * After adding a task to the queue, this flag is set
- * only to notify events to try to wake up the queue
- * using irq_work.
- *
- * We don't clear it even if the buffer is no longer
- * empty. The flag only causes the next event to run
- * irq_work to do the work queue wake up. The worse
- * that can happen if we race with !trace_empty() is that
- * an event will cause an irq_work to try to wake up
- * an empty queue.
- *
- * There's no reason to protect this flag either, as
- * the work queue and irq_work logic will do the necessary
- * synchronization for the wake ups. The only thing
- * that is necessary is that the wake up happens after
- * a task has been queued. It's OK for spurious wake ups.
- */
- work->waiters_pending = true;
+ /*
+ * The events can happen in critical sections where
+ * checking a work queue can cause deadlocks.
+ * After adding a task to the queue, this flag is set
+ * only to notify events to try to wake up the queue
+ * using irq_work.
+ *
+ * We don't clear it even if the buffer is no longer
+ * empty. The flag only causes the next event to run
+ * irq_work to do the work queue wake up. The worse
+ * that can happen if we race with !trace_empty() is that
+ * an event will cause an irq_work to try to wake up
+ * an empty queue.
+ *
+ * There's no reason to protect this flag either, as
+ * the work queue and irq_work logic will do the necessary
+ * synchronization for the wake ups. The only thing
+ * that is necessary is that the wake up happens after
+ * a task has been queued. It's OK for spurious wake ups.
+ */
+ work->waiters_pending = true;
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
+ break;
+
+ if (cpu != RING_BUFFER_ALL_CPUS &&
+ !ring_buffer_empty_cpu(buffer, cpu)) {
+ unsigned long flags;
+ bool pagebusy;
+
+ if (!full)
+ break;
+
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ if (!pagebusy)
+ break;
+ }
- if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
- (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
schedule();
+ }
finish_wait(&work->waiters, &wait);
- return 0;
+
+ return ret;
}
/**
}
#endif /* CONFIG_TRACER_MAX_TRACE */
-static int wait_on_pipe(struct trace_iterator *iter)
+static int wait_on_pipe(struct trace_iterator *iter, bool full)
{
/* Iterators are static, they should be filled or empty */
if (trace_buffer_iter(iter, iter->cpu_file))
return 0;
- return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
+ return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
+ full);
}
#ifdef CONFIG_FTRACE_STARTUP_TEST
mutex_unlock(&iter->mutex);
- ret = wait_on_pipe(iter);
+ ret = wait_on_pipe(iter, false);
mutex_lock(&iter->mutex);
if (ret)
return ret;
-
- if (signal_pending(current))
- return -EINTR;
}
return 1;
goto out_unlock;
}
mutex_unlock(&trace_types_lock);
- ret = wait_on_pipe(iter);
+ ret = wait_on_pipe(iter, false);
mutex_lock(&trace_types_lock);
if (ret) {
size = ret;
goto out_unlock;
}
- if (signal_pending(current)) {
- size = -EINTR;
- goto out_unlock;
- }
goto again;
}
size = 0;
};
struct buffer_ref *ref;
int entries, size, i;
- ssize_t ret;
+ ssize_t ret = 0;
mutex_lock(&trace_types_lock);
int r;
ref = kzalloc(sizeof(*ref), GFP_KERNEL);
- if (!ref)
+ if (!ref) {
+ ret = -ENOMEM;
break;
+ }
ref->ref = 1;
ref->buffer = iter->trace_buffer->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
if (!ref->page) {
+ ret = -ENOMEM;
kfree(ref);
break;
}
/* did we read anything? */
if (!spd.nr_pages) {
+ if (ret)
+ goto out;
+
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
ret = -EAGAIN;
goto out;
}
mutex_unlock(&trace_types_lock);
- ret = wait_on_pipe(iter);
+ ret = wait_on_pipe(iter, true);
mutex_lock(&trace_types_lock);
if (ret)
goto out;
- if (signal_pending(current)) {
- ret = -EINTR;
- goto out;
- }
+
goto again;
}