diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 53 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 232 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 43 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_i2c.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_panel.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 64 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo_regs.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_tv.c | 53 |
17 files changed, 528 insertions, 109 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 0ca7f7646ab5..2e9268da58d8 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -38,7 +38,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ dvo_ch7017.o \ dvo_ivch.o \ dvo_tfp410.o \ - dvo_sil164.o + dvo_sil164.o \ + i915_gem_dmabuf.o i915-$(CONFIG_COMPAT) += i915_ioc32.o diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index eb2b3c25b9e1..5363e9c66c27 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2032,6 +2032,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor) 1, minor); drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, + 1, minor); } #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d3e194853061..238a52165833 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1012,7 +1012,7 @@ static const struct dev_pm_ops i915_pm_ops = { .restore = i915_pm_resume, }; -static struct vm_operations_struct i915_gem_vm_ops = { +static const struct vm_operations_struct i915_gem_vm_ops = { .fault = i915_gem_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, @@ -1039,7 +1039,7 @@ static struct drm_driver driver = { */ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, .load = i915_driver_load, .unload = i915_driver_unload, .open = i915_driver_open, @@ -1062,6 +1062,12 @@ static struct drm_driver driver = { .gem_init_object = i915_gem_init_object, .gem_free_object = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = i915_gem_prime_export, + .gem_prime_import = i915_gem_prime_import, + .dumb_create = i915_gem_dumb_create, .dumb_map_offset = i915_gem_mmap_gtt, .dumb_destroy = i915_gem_dumb_destroy, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 470c73219e6b..ccabadd2b6c3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -944,6 +944,11 @@ struct drm_i915_gem_object { struct scatterlist *sg_list; int num_sg; + /* prime dma-buf support */ + struct sg_table *sg_table; + void *dma_buf_vmapping; + int vmapping_count; + /** * Used for performing relocations during execbuffer insertion. */ @@ -1251,6 +1256,8 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); +int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, + gfp_t gfpmask); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); int i915_gem_object_sync(struct drm_i915_gem_object *obj, @@ -1349,6 +1356,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); +struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +struct dma_buf *i915_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gem_obj, int flags); + + /* i915_gem_gtt.c */ int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1c08e0900eff..a20ac438b8ef 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/swap.h> #include <linux/pci.h> +#include <linux/dma-buf.h> static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); @@ -538,6 +539,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto out; } + /* prime objects have no backing filp to GEM pread/pwrite + * pages from. + */ + if (!obj->base.filp) { + ret = -EINVAL; + goto out; + } + trace_i915_gem_object_pread(obj, args->offset, args->size); ret = i915_gem_shmem_pread(dev, obj, args, file); @@ -880,6 +889,14 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto out; } + /* prime objects have no backing filp to GEM pread/pwrite + * pages from. + */ + if (!obj->base.filp) { + ret = -EINVAL; + goto out; + } + trace_i915_gem_object_pwrite(obj, args->offset, args->size); ret = -EFAULT; @@ -1021,6 +1038,14 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOENT; + /* prime objects have no backing filp to GEM mmap + * pages from. + */ + if (!obj->filp) { + drm_gem_object_unreference_unlocked(obj); + return -EINVAL; + } + addr = vm_mmap(obj->filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); @@ -1302,8 +1327,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); } - -static int +int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, gfp_t gfpmask) { @@ -1312,6 +1336,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, struct inode *inode; struct page *page; + if (obj->pages || obj->sg_table) + return 0; + /* Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. */ @@ -1353,6 +1380,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) int page_count = obj->base.size / PAGE_SIZE; int i; + if (!obj->pages) + return; + BUG_ON(obj->madv == __I915_MADV_PURGED); if (i915_gem_object_needs_bit17_swizzle(obj)) @@ -2164,10 +2194,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) if (obj->gtt_space == NULL) return 0; - if (obj->pin_count != 0) { - DRM_ERROR("Attempting to unbind pinned buffer\n"); - return -EINVAL; - } + if (obj->pin_count) + return -EBUSY; ret = i915_gem_object_finish_gpu(obj); if (ret) @@ -3394,6 +3422,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct address_space *mapping; + u32 mask; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (obj == NULL) @@ -3404,8 +3433,15 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, return NULL; } + mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; + if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { + /* 965gm cannot relocate objects above 4GiB. */ + mask &= ~__GFP_HIGHMEM; + mask |= __GFP_DMA32; + } + mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; - mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); + mapping_set_gfp_mask(mapping, mask); i915_gem_info_add_obj(dev_priv, size); @@ -3458,6 +3494,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); + if (gem_obj->import_attach) + drm_prime_gem_destroy(gem_obj, obj->sg_table); + if (obj->phys_obj) i915_gem_detach_phys_object(dev, obj); diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c new file mode 100644 index 000000000000..aa308e1337db --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -0,0 +1,232 @@ +/* + * Copyright 2012 Red Hat Inc + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Dave Airlie <airlied@redhat.com> + */ +#include "drmP.h" +#include "i915_drv.h" +#include <linux/dma-buf.h> + +static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct drm_i915_gem_object *obj = attachment->dmabuf->priv; + struct drm_device *dev = obj->base.dev; + int npages = obj->base.size / PAGE_SIZE; + struct sg_table *sg = NULL; + int ret; + int nents; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ERR_PTR(ret); + + if (!obj->pages) { + ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); + if (ret) + goto out; + } + + /* link the pages into an SG then map the sg */ + sg = drm_prime_pages_to_sg(obj->pages, npages); + nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); +out: + mutex_unlock(&dev->struct_mutex); + return sg; +} + +static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *sg, enum dma_data_direction dir) +{ + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); + sg_free_table(sg); + kfree(sg); +} + +static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) +{ + struct drm_i915_gem_object *obj = dma_buf->priv; + + if (obj->base.export_dma_buf == dma_buf) { + /* drop the reference on the export fd holds */ + obj->base.export_dma_buf = NULL; + drm_gem_object_unreference_unlocked(&obj->base); + } +} + +static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) +{ + struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->base.dev; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ERR_PTR(ret); + + if (obj->dma_buf_vmapping) { + obj->vmapping_count++; + goto out_unlock; + } + + if (!obj->pages) { + ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ERR_PTR(ret); + } + } + + obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); + if (!obj->dma_buf_vmapping) { + DRM_ERROR("failed to vmap object\n"); + goto out_unlock; + } + + obj->vmapping_count = 1; +out_unlock: + mutex_unlock(&dev->struct_mutex); + return obj->dma_buf_vmapping; +} + +static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ + struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->base.dev; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return; + + --obj->vmapping_count; + if (obj->vmapping_count == 0) { + vunmap(obj->dma_buf_vmapping); + obj->dma_buf_vmapping = NULL; + } + mutex_unlock(&dev->struct_mutex); +} + +static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) +{ + return NULL; +} + +static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) +{ + +} +static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) +{ + return NULL; +} + +static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) +{ + +} + +static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) +{ + return -EINVAL; +} + +static const struct dma_buf_ops i915_dmabuf_ops = { + .map_dma_buf = i915_gem_map_dma_buf, + .unmap_dma_buf = i915_gem_unmap_dma_buf, + .release = i915_gem_dmabuf_release, + .kmap = i915_gem_dmabuf_kmap, + .kmap_atomic = i915_gem_dmabuf_kmap_atomic, + .kunmap = i915_gem_dmabuf_kunmap, + .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, + .mmap = i915_gem_dmabuf_mmap, + .vmap = i915_gem_dmabuf_vmap, + .vunmap = i915_gem_dmabuf_vunmap, +}; + +struct dma_buf *i915_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gem_obj, int flags) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + + return dma_buf_export(obj, &i915_dmabuf_ops, + obj->base.size, 0600); +} + +struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct dma_buf_attachment *attach; + struct sg_table *sg; + struct drm_i915_gem_object *obj; + int npages; + int size; + int ret; + + /* is this one of own objects? */ + if (dma_buf->ops == &i915_dmabuf_ops) { + obj = dma_buf->priv; + /* is it from our device? */ + if (obj->base.dev == dev) { + drm_gem_object_reference(&obj->base); + return &obj->base; + } + } + + /* need to attach */ + attach = dma_buf_attach(dma_buf, dev->dev); + if (IS_ERR(attach)) + return ERR_CAST(attach); + + sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto fail_detach; + } + + size = dma_buf->size; + npages = size / PAGE_SIZE; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (obj == NULL) { + ret = -ENOMEM; + goto fail_unmap; + } + + ret = drm_gem_private_object_init(dev, &obj->base, size); + if (ret) { + kfree(obj); + goto fail_unmap; + } + + obj->sg_table = sg; + obj->base.import_attach = attach; + + return &obj->base; + +fail_unmap: + dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); +fail_detach: + dma_buf_detach(dma_buf, attach); + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 29d573c27b35..9fd25a435536 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -267,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, BUG(); } - if (dev_priv->mm.gtt->needs_dmar) { + if (obj->sg_table) { + i915_ppgtt_insert_sg_entries(ppgtt, + obj->sg_table->sgl, + obj->sg_table->nents, + obj->gtt_space->start >> PAGE_SHIFT, + pte_flags); + } else if (dev_priv->mm.gtt->needs_dmar) { BUG_ON(!obj->sg_list); i915_ppgtt_insert_sg_entries(ppgtt, @@ -371,7 +377,12 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, struct drm_i915_private *dev_priv = dev->dev_private; unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); - if (dev_priv->mm.gtt->needs_dmar) { + if (obj->sg_table) { + intel_gtt_insert_sg_entries(obj->sg_table->sgl, + obj->sg_table->nents, + obj->gtt_space->start >> PAGE_SHIFT, + agp_type); + } else if (dev_priv->mm.gtt->needs_dmar) { BUG_ON(!obj->sg_list); intel_gtt_insert_sg_entries(obj->sg_list, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6553dcc2ca79..0e876646d769 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -350,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, rps_work); - u8 new_delay = dev_priv->cur_delay; u32 pm_iir, pm_imr; + u8 new_delay; spin_lock_irq(&dev_priv->rps_lock); pm_iir = dev_priv->pm_iir; @@ -360,41 +360,18 @@ static void gen6_pm_rps_work(struct work_struct *work) I915_WRITE(GEN6_PMIMR, 0); spin_unlock_irq(&dev_priv->rps_lock); - if (!pm_iir) + if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) return; mutex_lock(&dev_priv->dev->struct_mutex); - if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { - if (dev_priv->cur_delay != dev_priv->max_delay) - new_delay = dev_priv->cur_delay + 1; - if (new_delay > dev_priv->max_delay) - new_delay = dev_priv->max_delay; - } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { - gen6_gt_force_wake_get(dev_priv); - if (dev_priv->cur_delay != dev_priv->min_delay) - new_delay = dev_priv->cur_delay - 1; - if (new_delay < dev_priv->min_delay) { - new_delay = dev_priv->min_delay; - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - I915_READ(GEN6_RP_INTERRUPT_LIMITS) | - ((new_delay << 16) & 0x3f0000)); - } else { - /* Make sure we continue to get down interrupts - * until we hit the minimum frequency */ - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); - } - gen6_gt_force_wake_put(dev_priv); - } + + if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) + new_delay = dev_priv->cur_delay + 1; + else + new_delay = dev_priv->cur_delay - 1; gen6_set_rps(dev_priv->dev, new_delay); - dev_priv->cur_delay = new_delay; - /* - * rps_lock not held here because clearing is non-destructive. There is - * an *extremely* unlikely race with gen6_rps_enable() that is prevented - * by holding struct_mutex for the duration of the write. - */ mutex_unlock(&dev_priv->dev->struct_mutex); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1d801724c1db..9f5148acf73c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -910,9 +910,10 @@ static void assert_pll(struct drm_i915_private *dev_priv, /* For ILK+ */ static void assert_pch_pll(struct drm_i915_private *dev_priv, - struct intel_crtc *intel_crtc, bool state) + struct intel_pch_pll *pll, + struct intel_crtc *crtc, + bool state) { - int reg; u32 val; bool cur_state; @@ -921,30 +922,37 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv, return; } - if (!intel_crtc->pch_pll) { - WARN(1, "asserting PCH PLL enabled with no PLL\n"); + if (WARN (!pll, + "asserting PCH PLL %s with no PLL\n", state_string(state))) return; - } - if (HAS_PCH_CPT(dev_priv->dev)) { + val = I915_READ(pll->pll_reg); + cur_state = !!(val & DPLL_VCO_ENABLE); + WARN(cur_state != state, + "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n", + pll->pll_reg, state_string(state), state_string(cur_state), val); + + /* Make sure the selected PLL is correctly attached to the transcoder */ + if (crtc && HAS_PCH_CPT(dev_priv->dev)) { u32 pch_dpll; pch_dpll = I915_READ(PCH_DPLL_SEL); - - /* Make sure the selected PLL is enabled to the transcoder */ - WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8), - "transcoder %d PLL not enabled\n", intel_crtc->pipe); + cur_state = pll->pll_reg == _PCH_DPLL_B; + if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state, + "PLL[%d] not attached to this transcoder %d: %08x\n", + cur_state, crtc->pipe, pch_dpll)) { + cur_state = !!(val >> (4*crtc->pipe + 3)); + WARN(cur_state != state, + "PLL[%d] not %s on this transcoder %d: %08x\n", + pll->pll_reg == _PCH_DPLL_B, + state_string(state), + crtc->pipe, + val); + } } - - reg = intel_crtc->pch_pll->pll_reg; - val = I915_READ(reg); - cur_state = !!(val & DPLL_VCO_ENABLE); - WARN(cur_state != state, - "PCH PLL state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); } -#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) -#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) +#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true) +#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false) static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) @@ -1424,7 +1432,7 @@ static void intel_enable_pch_pll(struct intel_crtc *intel_crtc) assert_pch_refclk_enabled(dev_priv); if (pll->active++ && pll->on) { - assert_pch_pll_enabled(dev_priv, intel_crtc); + assert_pch_pll_enabled(dev_priv, pll, NULL); return; } @@ -1460,12 +1468,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) intel_crtc->base.base.id); if (WARN_ON(pll->active == 0)) { - assert_pch_pll_disabled(dev_priv, intel_crtc); + assert_pch_pll_disabled(dev_priv, pll, NULL); return; } if (--pll->active) { - assert_pch_pll_enabled(dev_priv, intel_crtc); + assert_pch_pll_enabled(dev_priv, pll, NULL); return; } @@ -1495,7 +1503,9 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv, BUG_ON(dev_priv->info->gen < 5); /* Make sure PCH DPLL is enabled */ - assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc)); + assert_pch_pll_enabled(dev_priv, + to_intel_crtc(crtc)->pch_pll, + to_intel_crtc(crtc)); /* FDI must be feeding us bits for PCH ports */ assert_fdi_tx_enabled(dev_priv, pipe); @@ -6918,7 +6928,7 @@ void intel_modeset_init(struct drm_device *dev) dev->mode_config.preferred_depth = 24; dev->mode_config.prefer_shadow = 1; - dev->mode_config.funcs = (void *)&intel_mode_funcs; + dev->mode_config.funcs = &intel_mode_funcs; intel_init_quirks(dev); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9b2effcc90e5..c71e7890e6f6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -266,6 +266,9 @@ intel_dp_mode_valid(struct drm_connector *connector, if (mode->clock < 10000) return MODE_CLOCK_LOW; + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_H_ILLEGAL; + return MODE_OK; } @@ -702,6 +705,9 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, mode->clock = intel_dp->panel_fixed_mode->clock; } + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return false; + DRM_DEBUG_KMS("DP link computation with max lane count %i " "max bw %02x pixel clock %iKHz\n", max_lane_count, bws[max_clock], mode->clock); @@ -1154,11 +1160,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp) DRM_DEBUG_KMS("Turn eDP power off\n"); - WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); - ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */ + WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); pp = ironlake_get_pp_control(dev_priv); - pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); + pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -1266,18 +1271,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + /* Make sure the panel is off before trying to change the mode. But also + * ensure that we have vdd while we switch off the panel. */ + ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); ironlake_edp_panel_off(intel_dp); - /* Wake up the sink first */ - ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_link_down(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, false); - - /* Make sure the panel is off before trying to - * change the mode - */ } static void intel_dp_commit(struct drm_encoder *encoder) @@ -1309,10 +1312,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { + /* Switching the panel off requires vdd. */ + ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); ironlake_edp_panel_off(intel_dp); - ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, mode); intel_dp_link_down(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, false); @@ -1961,6 +1965,23 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) return false; } +static void +intel_dp_probe_oui(struct intel_dp *intel_dp) +{ + u8 buf[3]; + + if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) + return; + + if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) + DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", + buf[0], buf[1], buf[2]); + + if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) + DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", + buf[0], buf[1], buf[2]); +} + static bool intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) { @@ -2142,6 +2163,8 @@ intel_dp_detect(struct drm_connector *connector, bool force) if (status != connector_status_connected) return status; + intel_dp_probe_oui(intel_dp); + if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); } else { diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 4a9707dd0f9c..1991a4408cf9 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -396,11 +396,22 @@ clear_err: * Wait for bus to IDLE before clearing NAK. * If we clear the NAK while bus is still active, then it will stay * active and the next transaction may fail. + * + * If no ACK is received during the address phase of a transaction, the + * adapter must report -ENXIO. It is not clear what to return if no ACK + * is received at other times. But we have to be careful to not return + * spurious -ENXIO because that will prevent i2c and drm edid functions + * from retrying. So return -ENXIO only when gmbus properly quiescents - + * timing out seems to happen when there _is_ a ddc chip present, but + * it's slow responding and only answers on the 2nd retry. */ + ret = -ENXIO; if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, - 10)) + 10)) { DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", adapter->name); + ret = -ETIMEDOUT; + } /* Toggle the Software Clear Interrupt bit. This has the effect * of resetting the GMBUS controller and so clearing the @@ -414,14 +425,6 @@ clear_err: adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); - /* - * If no ACK is received during the address phase of a transaction, - * the adapter must report -ENXIO. - * It is not clear what to return if no ACK is received at other times. - * So, we always return -ENXIO in all NAK cases, to ensure we send - * it at least during the one case that is specified. - */ - ret = -ENXIO; goto out; timeout: diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 9dee82350def..08eb04c787e8 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -747,6 +747,14 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, + .ident = "Hewlett-Packard HP t5740e Thin Client", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, .ident = "Hewlett-Packard t5745", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 2b2e011e9055..2a1625d84a69 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -368,6 +368,7 @@ int intel_panel_setup_backlight(struct drm_device *dev) else return -ENODEV; + memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; props.max_brightness = intel_panel_get_max_backlight(dev); dev_priv->backlight = diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8e79ff67ec98..d0ce2a5b1d3f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2270,10 +2270,33 @@ void ironlake_disable_drps(struct drm_device *dev) void gen6_set_rps(struct drm_device *dev, u8 val) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 swreq; + u32 limits; - swreq = (val & 0x3ff) << 25; - I915_WRITE(GEN6_RPNSWREQ, swreq); + limits = 0; + if (val >= dev_priv->max_delay) + val = dev_priv->max_delay; + else + limits |= dev_priv->max_delay << 24; + + if (val <= dev_priv->min_delay) + val = dev_priv->min_delay; + else + limits |= dev_priv->min_delay << 16; + + if (val == dev_priv->cur_delay) + return; + + I915_WRITE(GEN6_RPNSWREQ, + GEN6_FREQUENCY(val) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + + /* Make sure we continue to get interrupts + * until we hit the minimum or maximum frequencies. + */ + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); + + dev_priv->cur_delay = val; } void gen6_disable_rps(struct drm_device *dev) @@ -2327,11 +2350,10 @@ int intel_enable_rc6(const struct drm_device *dev) void gen6_enable_rps(struct drm_i915_private *dev_priv) { struct intel_ring_buffer *ring; - u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); + u32 rp_state_cap; + u32 gt_perf_status; u32 pcu_mbox, rc6_mask = 0; u32 gtfifodbg; - int cur_freq, min_freq, max_freq; int rc6_mode; int i; @@ -2352,6 +2374,14 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) gen6_gt_force_wake_get(dev_priv); + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); + + /* In units of 100MHz */ + dev_priv->max_delay = rp_state_cap & 0xff; + dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16; + dev_priv->cur_delay = 0; + /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -2399,8 +2429,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - 18 << 24 | - 6 << 16); + dev_priv->max_delay << 24 | + dev_priv->min_delay << 16); I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); I915_WRITE(GEN6_RP_UP_EI, 100000); @@ -2408,7 +2438,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_MODE | + GEN6_RP_MEDIA_HW_NORMAL_MODE | GEN6_RP_MEDIA_IS_GFX | GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG | @@ -2426,10 +2456,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) 500)) DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); - min_freq = (rp_state_cap & 0xff0000) >> 16; - max_freq = rp_state_cap & 0xff; - cur_freq = (gt_perf_status & 0xff00) >> 8; - /* Check for overclock support */ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500)) @@ -2440,14 +2466,11 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) 500)) DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); if (pcu_mbox & (1<<31)) { /* OC supported */ - max_freq = pcu_mbox & 0xff; + dev_priv->max_delay = pcu_mbox & 0xff; DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); } - /* In units of 100MHz */ - dev_priv->max_delay = max_freq; - dev_priv->min_delay = min_freq; - dev_priv->cur_delay = cur_freq; + gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); /* requires MSI enabled */ I915_WRITE(GEN6_PMIER, @@ -3580,8 +3603,9 @@ static void gen6_sanitize_pm(struct drm_device *dev) limits |= (dev_priv->min_delay & 0x3f) << 16; if (old != limits) { - DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n", - limits, old); + /* Note that the known failure case is to read back 0. */ + DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS " + "expected %08x, was %08x\n", limits, old); I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index ca3c6e128594..2f5106a488c5 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -780,10 +780,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, ((v_sync_len & 0x30) >> 4); dtd->part2.dtd_flags = 0x18; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE; if (mode->flags & DRM_MODE_FLAG_PHSYNC) - dtd->part2.dtd_flags |= 0x2; + dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE; if (mode->flags & DRM_MODE_FLAG_PVSYNC) - dtd->part2.dtd_flags |= 0x4; + dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; dtd->part2.sdvo_flags = 0; dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; @@ -817,9 +819,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, mode->clock = dtd->part1.clock * 10; mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); - if (dtd->part2.dtd_flags & 0x2) + if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) + mode->flags |= DRM_MODE_FLAG_INTERLACE; + if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) mode->flags |= DRM_MODE_FLAG_PHSYNC; - if (dtd->part2.dtd_flags & 0x4) + if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) mode->flags |= DRM_MODE_FLAG_PVSYNC; } @@ -1246,8 +1250,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) { + struct drm_device *dev = intel_sdvo->base.base.dev; u8 response[2]; + /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise + * on the line. */ + if (IS_I945G(dev) || IS_I945GM(dev)) + return false; + return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, &response, 2) && response[0]; } diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 6b7b22f4d63e..9d030142ee43 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -61,6 +61,11 @@ struct intel_sdvo_caps { u16 output_flags; } __attribute__((packed)); +/* Note: SDVO detailed timing flags match EDID misc flags. */ +#define DTD_FLAG_HSYNC_POSITIVE (1 << 1) +#define DTD_FLAG_VSYNC_POSITIVE (1 << 2) +#define DTD_FLAG_INTERLACE (1 << 7) + /** This matches the EDID DTD structure, more or less */ struct intel_sdvo_dtd { struct { diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 3346612d2953..a233a51fd7e6 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -674,6 +674,54 @@ static const struct tv_mode tv_modes[] = { .filter_table = filter_table, }, { + .name = "480p", + .clock = 107520, + .refresh = 59940, + .oversample = TV_OVERSAMPLE_4X, + .component_only = 1, + + .hsync_end = 64, .hblank_end = 122, + .hblank_start = 842, .htotal = 857, + + .progressive = true, .trilevel_sync = false, + + .vsync_start_f1 = 12, .vsync_start_f2 = 12, + .vsync_len = 12, + + .veq_ena = false, + + .vi_end_f1 = 44, .vi_end_f2 = 44, + .nbr_end = 479, + + .burst_ena = false, + + .filter_table = filter_table, + }, + { + .name = "576p", + .clock = 107520, + .refresh = 50000, + .oversample = TV_OVERSAMPLE_4X, + .component_only = 1, + + .hsync_end = 64, .hblank_end = 139, + .hblank_start = 859, .htotal = 863, + + .progressive = true, .trilevel_sync = false, + + .vsync_start_f1 = 10, .vsync_start_f2 = 10, + .vsync_len = 10, + + .veq_ena = false, + + .vi_end_f1 = 48, .vi_end_f2 = 48, + .nbr_end = 575, + + .burst_ena = false, + + .filter_table = filter_table, + }, + { .name = "720p@60Hz", .clock = 148800, .refresh = 60000, @@ -1194,6 +1242,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv, I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); I915_WRITE(TV_CTL, save_tv_ctl); + POSTING_READ(TV_CTL); + + /* For unknown reasons the hw barfs if we don't do this vblank wait. */ + intel_wait_for_vblank(intel_tv->base.base.dev, + to_intel_crtc(intel_tv->base.base.crtc)->pipe); /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { |