/**
* sr_classp5_disable() - disable for a voltage domain
* @sr: SmartReflex module, which need to be disabled
* @is_volt_reset: reset the voltage?
*
* This function has the necessity to either disable SR alone OR disable SR
* and reset voltage to appropriate level depending on is_volt_reset parameter.
*
* NOTE: Appropriate locks must be held by calling path to ensure mutual
* exclusivity
*/
static int sr_classp5_disable(struct omap_sr *sr, int is_volt_reset)
{
struct voltagedomain *voltdm = NULL;
struct omap_volt_data *volt_data = NULL;
struct sr_classp5_calib_data *work_data = NULL;
if (IS_ERR_OR_NULL(sr) || IS_ERR_OR_NULL(sr->voltdm)) {
pr_err("%s: bad parameters!\n", __func__);
return -EINVAL;
}
work_data = (struct sr_classp5_calib_data *)sr->voltdm_cdata;
if (IS_ERR_OR_NULL(work_data)) {
pr_err("%s: bad work data %s\n", __func__, sr->name);
return -EINVAL;
}
if (is_idle_task(current)) {
/*
* we should not have seen this path if calibration !complete
* pm_qos constraint is already released after voltage
* calibration work is finished
*/
WARN_ON(work_data->work_active);
return 0;
}
/* Rest is regular DVFS path */
voltdm = sr->voltdm;
volt_data = omap_voltage_get_curr_vdata(voltdm);
if (IS_ERR_OR_NULL(volt_data)) {
pr_warning("%s: Voltage data is NULL. Cannot disable %s\n",
__func__, sr->name);
return -ENODATA;
}
/* need to do rest of code ONLY if required */
if (volt_data->volt_calibrated && !work_data->work_active) {
/*
* We are going OFF - disable clocks manually to allow OFF-mode.
*/
if (sr->suspended)
sr->ops->put(sr);
return 0;
}
if (work_data->work_active) {
/* flag work is dead and remove the old work */
work_data->work_active = false;
cancel_delayed_work_sync(&work_data->work);
sr_notifier_control(sr, false);
}
sr_classp5_stop_hw_loop(sr);
if (is_volt_reset)
voltdm_reset(sr->voltdm);
/* Canceled SR, so no more need to keep request */
pm_qos_update_request(&work_data->qos, PM_QOS_DEFAULT_VALUE);
/*
* We are going OFF - disable clocks manually to allow OFF-mode.
*/
if (sr->suspended) {
/* !!! Should never ever be here - no guarantee to recover !!!*/
WARN(true, "Trying to go OFF with invalid AVS state\n");
sr->ops->put(sr);
}
return 0;
}
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
struct sg_table *sgt;
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
/* is this one of own objects? */
if (dma_buf->ops == &exynos_dmabuf_ops) {
struct drm_gem_object *obj;
exynos_gem_obj = dma_buf->priv;
obj = &exynos_gem_obj->base;
/* is it from our device? */
if (obj->dev == drm_dev) {
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(obj);
return obj;
}
}
attach = dma_buf_attach(dma_buf, drm_dev->dev);
if (IS_ERR(attach))
return ERR_PTR(-EINVAL);
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(sgt)) {
ret = PTR_ERR(sgt);
goto err_buf_detach;
}
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
ret = -ENOMEM;
goto err_unmap_attach;
}
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
goto err_free_buffer;
}
sgl = sgt->sgl;
buffer->size = dma_buf->size;
buffer->dma_addr = sg_dma_address(sgl);
if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
/*
* this case could be CONTIG or NONCONTIG type but for now
* sets NONCONTIG.
* TODO. we have to find a way that exporter can notify
* the type of its own buffer to importer.
*/
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
exynos_gem_obj->buffer = buffer;
buffer->sgt = sgt;
exynos_gem_obj->base.import_attach = attach;
DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
buffer->size);
return &exynos_gem_obj->base;
err_free_buffer:
kfree(buffer);
buffer = NULL;
err_unmap_attach:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
return ERR_PTR(ret);
}
static int
userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
const struct mmu_notifier_range *range)
{
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it;
struct mutex *unlock = NULL;
unsigned long end;
int ret = 0;
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
return 0;
/* interval ranges are inclusive, but invalidate range is exclusive */
end = range->end - 1;
spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects, range->start, end);
while (it) {
struct drm_i915_gem_object *obj;
if (!mmu_notifier_range_blockable(range)) {
ret = -EAGAIN;
break;
}
/*
* The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
* since our serialisation is via the spinlock and not
* the struct_mutex - and consequently use it after it
* is freed and then double free it. To prevent that
* use-after-free we only acquire a reference on the
* object if it is not in the process of being destroyed.
*/
obj = container_of(it, struct i915_mmu_object, it)->obj;
if (!kref_get_unless_zero(&obj->base.refcount)) {
it = interval_tree_iter_next(it, range->start, end);
continue;
}
spin_unlock(&mn->lock);
if (!unlock) {
unlock = &mn->mm->i915->drm.struct_mutex;
switch (mutex_trylock_recursive(unlock)) {
default:
case MUTEX_TRYLOCK_FAILED:
if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
i915_gem_object_put(obj);
return -EINTR;
}
/* fall through */
case MUTEX_TRYLOCK_SUCCESS:
break;
case MUTEX_TRYLOCK_RECURSIVE:
unlock = ERR_PTR(-EEXIST);
break;
}
}
ret = i915_gem_object_unbind(obj);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
i915_gem_object_put(obj);
if (ret)
goto unlock;
spin_lock(&mn->lock);
/*
* As we do not (yet) protect the mmu from concurrent insertion
* over this range, there is no guarantee that this search will
* terminate given a pathologic workload.
*/
it = interval_tree_iter_first(&mn->objects, range->start, end);
}
spin_unlock(&mn->lock);
unlock:
if (!IS_ERR_OR_NULL(unlock))
mutex_unlock(unlock);
return ret;
}
PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData)
{
IMG_INT32 opp_count;
IMG_UINT32 i, *freq_list;
struct opp *opp;
unsigned long freq;
/**
* We query and store the list of SGX frequencies just this once under the
* assumption that they are unchanging, e.g. no disabling of high frequency
* option for thermal management. This is currently valid for 4430 and 4460.
*/
rcu_read_lock();
opp_count = opp_get_opp_count(&gpsPVRLDMDev->dev);
if (opp_count < 1)
{
rcu_read_unlock();
PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp count"));
return PVRSRV_ERROR_NOT_SUPPORTED;
}
/**
* Allocate the frequency list with a slot for each available frequency plus
* one additional slot to hold a designated frequency value to assume when in
* an unknown frequency state.
*/
freq_list = kmalloc((opp_count + 1) * sizeof(IMG_UINT32), GFP_ATOMIC);
if (!freq_list)
{
rcu_read_unlock();
PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not allocate frequency list"));
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
/**
* Fill in frequency list from lowest to highest then finally the "unknown"
* frequency value. We use the highest available frequency as our assumed value
* when in an unknown state, because it is safer for APM and hardware recovery
* timers to be longer than intended rather than shorter.
*/
freq = 0;
for (i = 0; i < opp_count; i++)
{
opp = opp_find_freq_ceil(&gpsPVRLDMDev->dev, &freq);
if (IS_ERR_OR_NULL(opp))
{
rcu_read_unlock();
PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp level %d", i));
kfree(freq_list);
return PVRSRV_ERROR_NOT_SUPPORTED;
}
freq_list[i] = (IMG_UINT32)freq;
freq++;
}
rcu_read_unlock();
freq_list[opp_count] = freq_list[opp_count - 1];
psSysSpecificData->ui32SGXFreqListSize = opp_count + 1;
psSysSpecificData->pui32SGXFreqList = freq_list;
/* Start in unknown state - no frequency request to DVFS yet made */
psSysSpecificData->ui32SGXFreqListIndex = opp_count;
return PVRSRV_OK;
}
static int davinci_musb_init(struct musb *musb)
{
void __iomem *tibase = musb->ctrl_base;
u32 revision;
int ret = -ENODEV;
usb_nop_xceiv_register();
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv)) {
ret = -EPROBE_DEFER;
goto unregister;
}
musb->mregs += DAVINCI_BASE_OFFSET;
/* returns zero if e.g. not clocked */
revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
if (revision == 0)
goto fail;
setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
davinci_musb_source_power(musb, 0, 1);
/* dm355 EVM swaps D+/D- for signal integrity, and
* is clocked from the main 24 MHz crystal.
*/
if (machine_is_davinci_dm355_evm()) {
u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
phy_ctrl &= ~(3 << 9);
phy_ctrl |= USBPHY_DATAPOL;
__raw_writel(phy_ctrl, USB_PHY_CTRL);
}
/* On dm355, the default-A state machine needs DRVVBUS control.
* If we won't be a host, there's no need to turn it on.
*/
if (cpu_is_davinci_dm355()) {
u32 deepsleep = __raw_readl(DM355_DEEPSLEEP);
deepsleep &= ~DRVVBUS_FORCE;
__raw_writel(deepsleep, DM355_DEEPSLEEP);
}
/* reset the controller */
musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
/* start the on-chip PHY and its PLL */
phy_on();
msleep(5);
/* NOTE: irqs are in mixed mode, not bypass to pure-musb */
pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
revision, __raw_readl(USB_PHY_CTRL),
musb_readb(tibase, DAVINCI_USB_CTRL_REG));
musb->isr = davinci_musb_interrupt;
return 0;
fail:
usb_put_phy(musb->xceiv);
unregister:
usb_nop_xceiv_unregister();
return ret;
}
/**
* sr_classp5_suspend_noirq() - class suspend_noirq handler
* @sr: SmartReflex module which is moving to suspend
*
* The purpose of suspend_noirq handler is to make sure that Calibration
* works are canceled before moving to OFF mode.
* Otherwise these works may be executed at any moment, trigger
* SmartReflex and race with CPU Idle notifiers. As result - system
* will crash
*/
static int sr_classp5_suspend_noirq(struct omap_sr *sr)
{
struct sr_classp5_calib_data *work_data;
struct omap_volt_data *volt_data;
struct voltagedomain *voltdm;
int ret = 0;
if (IS_ERR_OR_NULL(sr)) {
pr_err("%s: bad parameters!\n", __func__);
return -EINVAL;
}
work_data = (struct sr_classp5_calib_data *)sr->voltdm_cdata;
if (IS_ERR_OR_NULL(work_data)) {
pr_err("%s: bad work data %s\n", __func__, sr->name);
return -EINVAL;
}
/*
* At suspend_noirq the code isn't needed to be protected by
* omap_dvfs_lock, but - Let's be paranoid (may have smth on other CPUx)
*/
mutex_lock(&omap_dvfs_lock);
voltdm = sr->voltdm;
volt_data = omap_voltage_get_curr_vdata(voltdm);
if (IS_ERR_OR_NULL(volt_data)) {
pr_warning("%s: Voltage data is NULL. Cannot disable %s\n",
__func__, sr->name);
ret = -ENODATA;
goto finish_suspend;
}
/*
* Check if calibration is active at this moment if yes -
* abort suspend.
*/
if (work_data->work_active) {
pr_warn("%s: %s Calibration is active, abort suspend (Vnom=%u)\n",
__func__, sr->name, volt_data->volt_nominal);
ret = -EBUSY;
goto finish_suspend;
}
/*
* Check if current voltage is calibrated if no -
* abort suspend.
*/
if (!volt_data->volt_calibrated) {
pr_warn("%s: %s Calibration hasn't been done, abort suspend (Vnom=%u)\n",
__func__, sr->name, volt_data->volt_nominal);
ret = -EBUSY;
goto finish_suspend;
}
/* Let's be paranoid - cancel Calibration work manually */
cancel_delayed_work_sync(&work_data->work);
work_data->work_active = false;
finish_suspend:
mutex_unlock(&omap_dvfs_lock);
return ret;
}
请发表评论