diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-04-24 15:47:41 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-05-03 11:18:12 +0200 |
commit | 1070a42b6bc5cc10a33d2fe22f0b295a0194a582 (patch) | |
tree | 7cbdeb398d3c957e5d703c7cec4fc55fad7e55f2 /drivers/gpu/drm/i915/i915_gem.c | |
parent | 9797fbfbcfe251b5297b82803002c713e61aa410 (diff) |
drm/i915: Move GEM initialisation from i915_dma.c to i915_gem.c
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 64 |
1 files changed, 64 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9bf11c8fe920..b402d70dc78b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3560,6 +3560,70 @@ cleanup_render_ring: return ret; } +static bool +intel_enable_ppgtt(struct drm_device *dev) +{ + if (i915_enable_ppgtt >= 0) + return i915_enable_ppgtt; + +#ifdef CONFIG_INTEL_IOMMU + /* Disable ppgtt on SNB if VT-d is on. */ + if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) + return false; +#endif + + return true; +} + +int i915_gem_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long gtt_size, mappable_size; + int ret; + + gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; + mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + + mutex_lock(&dev->struct_mutex); + if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { + /* PPGTT pdes are stolen from global gtt ptes, so shrink the + * aperture accordingly when using aliasing ppgtt. */ + gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; + + i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size); + + ret = i915_gem_init_aliasing_ppgtt(dev); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ret; + } + } else { + /* Let GEM Manage all of the aperture. + * + * However, leave one page at the end still bound to the scratch + * page. There are a number of places where the hardware + * apparently prefetches past the end of the object, and we've + * seen multiple hangs with the GPU head pointer stuck in a + * batchbuffer bound at the last page of the aperture. One page + * should be enough to keep any prefetching inside of the + * aperture. + */ + i915_gem_init_global_gtt(dev, 0, mappable_size, + gtt_size); + } + + ret = i915_gem_init_hw(dev); + mutex_unlock(&dev->struct_mutex); + if (ret) { + i915_gem_cleanup_aliasing_ppgtt(dev); + return ret; + } + + /* Allow hardware batchbuffers unless told otherwise. */ + dev_priv->allow_batchbuffer = 1; + return 0; +} + void i915_gem_cleanup_ringbuffer(struct drm_device *dev) { |