summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2014-07-28 18:17:07 +0530
committerMatthew Pedro <mapedro@nvidia.com>2014-08-04 10:58:09 -0700
commit0ef2d043a94be1637533829afee2d6a3427201e1 (patch)
treeb30cec6a35c455a97469590535d6160bb88540db /drivers/gpu
parent109cfcf5fbedd29ecc4fe6b2951d4efbc1ec48ec (diff)
gpu: nvgpu: fix error handling for mutex_acquire()
Currently if pmu_mutex_acquire() fails, we disable ELPG and move ahead. But it is not clear why it is required to disable ELPG in case where we fail to acquire mutex. Hence skip disabling ELPG if mutex_acquire() fails Bug 1533644 Change-Id: I7e8e99a701d0ba071eb31ac17582b04072ee55eb Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/448131 (cherry picked from commit 789c256dd74e2b2b0481e25b2af1b2202ea6f582) Reviewed-on: http://git-master/r/450268 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Naveen Kumar S <nkumars@nvidia.com> Reviewed-by: Matthew Pedro <mapedro@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c48
1 files changed, 12 insertions, 36 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 0f4ad8866de9..a5174b4a30bc 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1513,7 +1513,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 ret = 0;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
- u32 elpg_off = 0;
+ u32 mutex_ret = 0;
u32 i;
gk20a_dbg_fn("%d", hw_chid);
@@ -1522,10 +1522,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
for (i = 0; i < g->fifo.max_runlists; i++)
mutex_lock(&f->runlist_info[i].mutex);
- /* disable elpg if failed to acquire pmu mutex */
- elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
- if (elpg_off)
- gk20a_pmu_disable_elpg(g);
+ mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
/* issue preempt */
gk20a_writel(g, fifo_preempt_r(),
@@ -1557,10 +1554,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
gk20a_fifo_recover_ch(g, hw_chid, true);
}
- /* re-enable elpg or release pmu mutex */
- if (elpg_off)
- gk20a_pmu_enable_elpg(g);
- else
+ if (!mutex_ret)
pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
for (i = 0; i < g->fifo.max_runlists; i++)
@@ -1573,24 +1567,18 @@ int gk20a_fifo_enable_engine_activity(struct gk20a *g,
struct fifo_engine_info_gk20a *eng_info)
{
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
- u32 elpg_off;
+ u32 mutex_ret;
u32 enable;
gk20a_dbg_fn("");
- /* disable elpg if failed to acquire pmu mutex */
- elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
- if (elpg_off)
- gk20a_pmu_disable_elpg(g);
+ mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
enable = gk20a_readl(g, fifo_sched_disable_r());
enable &= ~(fifo_sched_disable_true_v() >> eng_info->runlist_id);
gk20a_writel(g, fifo_sched_disable_r(), enable);
- /* re-enable elpg or release pmu mutex */
- if (elpg_off)
- gk20a_pmu_enable_elpg(g);
- else
+ if (!mutex_ret)
pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
gk20a_dbg_fn("done");
@@ -1604,7 +1592,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat;
u32 pbdma_chid = ~0, engine_chid = ~0, disable;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
- u32 elpg_off;
+ u32 mutex_ret;
u32 err = 0;
gk20a_dbg_fn("");
@@ -1615,10 +1603,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
fifo_engine_status_engine_busy_v() && !wait_for_idle)
return -EBUSY;
- /* disable elpg if failed to acquire pmu mutex */
- elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
- if (elpg_off)
- gk20a_pmu_disable_elpg(g);
+ mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
disable = gk20a_readl(g, fifo_sched_disable_r());
disable = set_field(disable,
@@ -1660,10 +1645,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
}
clean_up:
- /* re-enable elpg or release pmu mutex */
- if (elpg_off)
- gk20a_pmu_enable_elpg(g);
- else
+ if (!mutex_ret)
pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (err) {
@@ -1826,25 +1808,19 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
struct fifo_runlist_info_gk20a *runlist = NULL;
struct fifo_gk20a *f = &g->fifo;
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
- u32 elpg_off;
+ u32 mutex_ret;
u32 ret = 0;
runlist = &f->runlist_info[runlist_id];
mutex_lock(&runlist->mutex);
- /* disable elpg if failed to acquire pmu mutex */
- elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
- if (elpg_off)
- gk20a_pmu_disable_elpg(g);
+ mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add,
wait_for_finish);
- /* re-enable elpg or release pmu mutex */
- if (elpg_off)
- gk20a_pmu_enable_elpg(g);
- else
+ if (!mutex_ret)
pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
mutex_unlock(&runlist->mutex);