#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mutex.h>
- #include <linux/of_device.h>
+ #include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
if (dw_hdmi_support_scdc(hdmi, display)) {
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
- drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 1);
+ drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 1);
else
- drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 0);
+ drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 0);
}
}
EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio);
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
/* Enabled Scrambling in the Sink */
- drm_scdc_set_scrambling(&hdmi->connector, 1);
+ drm_scdc_set_scrambling(hdmi->curr_conn, 1);
/*
* To activate the scrambler feature, you must ensure
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
HDMI_MC_SWRSTZ);
- drm_scdc_set_scrambling(&hdmi->connector, 0);
+ drm_scdc_set_scrambling(hdmi->curr_conn, 0);
}
}
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
hdmi->bridge.interlace_allowed = true;
+ hdmi->bridge.ddc = hdmi->ddc;
#ifdef CONFIG_OF
hdmi->bridge.of_node = pdev->dev.of_node;
#endif
if (!obj->resv)
obj->resv = &obj->_resv;
+ if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA))
+ drm_gem_gpuva_init(obj);
+
drm_vma_node_reset(&obj->vma_node);
INIT_LIST_HEAD(&obj->lru_node);
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
/*
- * Move pages to appropriate lru and release the pagevec, decrementing the
- * ref count of those pages.
+ * Move folios to appropriate lru and release the folios, decrementing the
+ * ref count of those folios.
*/
-static void drm_gem_check_release_pagevec(struct pagevec *pvec)
+static void drm_gem_check_release_batch(struct folio_batch *fbatch)
{
- check_move_unevictable_pages(pvec);
- __pagevec_release(pvec);
+ check_move_unevictable_folios(fbatch);
+ __folio_batch_release(fbatch);
cond_resched();
}
struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{
struct address_space *mapping;
- struct page *p, **pages;
- struct pagevec pvec;
- int i, npages;
-
+ struct page **pages;
+ struct folio *folio;
+ struct folio_batch fbatch;
+ int i, j, npages;
if (WARN_ON(!obj->filp))
return ERR_PTR(-EINVAL);
mapping_set_unevictable(mapping);
- for (i = 0; i < npages; i++) {
- p = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(p))
+ i = 0;
+ while (i < npages) {
+ folio = shmem_read_folio_gfp(mapping, i,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
goto fail;
- pages[i] = p;
+ for (j = 0; j < folio_nr_pages(folio); j++, i++)
+ pages[i] = folio_file_page(folio, i);
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
* correct region during swapin. Note that this requires
* so shmem can relocate pages during swapin if required.
*/
BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
- (page_to_pfn(p) >= 0x00100000UL));
+ (folio_pfn(folio) >= 0x00100000UL));
}
return pages;
fail:
mapping_clear_unevictable(mapping);
- pagevec_init(&pvec);
- while (i--) {
- if (!pagevec_add(&pvec, pages[i]))
- drm_gem_check_release_pagevec(&pvec);
+ folio_batch_init(&fbatch);
+ j = 0;
+ while (j < i) {
+ struct folio *f = page_folio(pages[j]);
+ if (!folio_batch_add(&fbatch, f))
+ drm_gem_check_release_batch(&fbatch);
+ j += folio_nr_pages(f);
}
- if (pagevec_count(&pvec))
- drm_gem_check_release_pagevec(&pvec);
+ if (fbatch.nr)
+ drm_gem_check_release_batch(&fbatch);
kvfree(pages);
- return ERR_CAST(p);
+ return ERR_CAST(folio);
}
EXPORT_SYMBOL(drm_gem_get_pages);
{
int i, npages;
struct address_space *mapping;
- struct pagevec pvec;
+ struct folio_batch fbatch;
mapping = file_inode(obj->filp)->i_mapping;
mapping_clear_unevictable(mapping);
npages = obj->size >> PAGE_SHIFT;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
for (i = 0; i < npages; i++) {
+ struct folio *folio;
+
if (!pages[i])
continue;
+ folio = page_folio(pages[i]);
if (dirty)
- set_page_dirty(pages[i]);
+ folio_mark_dirty(folio);
if (accessed)
- mark_page_accessed(pages[i]);
+ folio_mark_accessed(folio);
/* Undo the reference we took when populating the table */
- if (!pagevec_add(&pvec, pages[i]))
- drm_gem_check_release_pagevec(&pvec);
+ if (!folio_batch_add(&fbatch, folio))
+ drm_gem_check_release_batch(&fbatch);
+ i += folio_nr_pages(folio) - 1;
}
- if (pagevec_count(&pvec))
- drm_gem_check_release_pagevec(&pvec);
+ if (folio_batch_count(&fbatch))
+ drm_gem_check_release_batch(&fbatch);
kvfree(pages);
}
#include <linux/device.h>
#include <linux/module.h>
+ #include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
device_set_node(&dsi->dev, of_fwnode_handle(info->node));
dsi->channel = info->channel;
- strlcpy(dsi->name, info->type, sizeof(dsi->name));
+ strscpy(dsi->name, info->type, sizeof(dsi->name));
ret = mipi_dsi_device_add(dsi);
if (ret) {
* requirement is inherited from the wait-before-signal behavior required by
* the Vulkan timeline semaphore API.
*
+ * Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without
+ * blocking: an eventfd will be signaled when the syncobj is. This is useful to
+ * integrate the wait in an event loop.
+ *
*
* Import/export of syncobjs
* -------------------------
#include <linux/anon_inodes.h>
#include <linux/dma-fence-unwrap.h>
+ #include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait);
+ struct syncobj_eventfd_entry {
+ struct list_head node;
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
+ struct drm_syncobj *syncobj;
+ struct eventfd_ctx *ev_fd_ctx;
+ u64 point;
+ u32 flags;
+ };
+
+ static void
+ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+ struct syncobj_eventfd_entry *entry);
+
/**
* drm_syncobj_find - lookup and reference a sync object.
* @file_private: drm file private pointer
spin_unlock(&syncobj->lock);
}
+ static void
+ syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry)
+ {
+ eventfd_ctx_put(entry->ev_fd_ctx);
+ dma_fence_put(entry->fence);
+ /* This happens either inside the syncobj lock, or after the node has
+ * already been removed from the list.
+ */
+ list_del(&entry->node);
+ kfree(entry);
+ }
+
+ static void
+ drm_syncobj_add_eventfd(struct drm_syncobj *syncobj,
+ struct syncobj_eventfd_entry *entry)
+ {
+ spin_lock(&syncobj->lock);
+ list_add_tail(&entry->node, &syncobj->ev_fd_list);
+ syncobj_eventfd_entry_func(syncobj, entry);
+ spin_unlock(&syncobj->lock);
+ }
+
/**
* drm_syncobj_add_point - add new timeline point to the syncobj
* @syncobj: sync object to add timeline point do
struct dma_fence *fence,
uint64_t point)
{
- struct syncobj_wait_entry *cur, *tmp;
+ struct syncobj_wait_entry *wait_cur, *wait_tmp;
+ struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
struct dma_fence *prev;
dma_fence_get(fence);
dma_fence_chain_init(chain, prev, fence, point);
rcu_assign_pointer(syncobj->fence, &chain->base);
- list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
- syncobj_wait_syncobj_func(syncobj, cur);
+ list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
+ syncobj_wait_syncobj_func(syncobj, wait_cur);
+ list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+ syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
spin_unlock(&syncobj->lock);
/* Walk the chain once to trigger garbage collection */
struct dma_fence *fence)
{
struct dma_fence *old_fence;
- struct syncobj_wait_entry *cur, *tmp;
+ struct syncobj_wait_entry *wait_cur, *wait_tmp;
+ struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
if (fence)
dma_fence_get(fence);
rcu_assign_pointer(syncobj->fence, fence);
if (fence != old_fence) {
- list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
- syncobj_wait_syncobj_func(syncobj, cur);
+ list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
+ syncobj_wait_syncobj_func(syncobj, wait_cur);
+ list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+ syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
}
spin_unlock(&syncobj->lock);
*/
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
{
- struct dma_fence *fence = dma_fence_allocate_private_stub();
+ struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
- if (IS_ERR(fence))
- return PTR_ERR(fence);
+ if (!fence)
+ return -ENOMEM;
drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence);
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
+ struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
+
drm_syncobj_replace_fence(syncobj, NULL);
+
+ list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+ syncobj_eventfd_entry_free(ev_fd_cur);
+
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
kref_init(&syncobj->refcount);
INIT_LIST_HEAD(&syncobj->cb_list);
+ INIT_LIST_HEAD(&syncobj->ev_fd_list);
spin_lock_init(&syncobj->lock);
if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
return ret;
}
+ static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+ {
+ struct syncobj_eventfd_entry *entry =
+ container_of(cb, struct syncobj_eventfd_entry, fence_cb);
+
+ eventfd_signal(entry->ev_fd_ctx, 1);
+ syncobj_eventfd_entry_free(entry);
+ }
+
+ static void
+ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+ struct syncobj_eventfd_entry *entry)
+ {
+ int ret;
+ struct dma_fence *fence;
+
+ /* This happens inside the syncobj lock */
+ fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+ ret = dma_fence_chain_find_seqno(&fence, entry->point);
+ if (ret != 0 || !fence) {
+ dma_fence_put(fence);
+ return;
+ }
+
+ list_del_init(&entry->node);
+ entry->fence = fence;
+
+ if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
+ eventfd_signal(entry->ev_fd_ctx, 1);
+ syncobj_eventfd_entry_free(entry);
+ } else {
+ ret = dma_fence_add_callback(fence, &entry->fence_cb,
+ syncobj_eventfd_entry_fence_func);
+ if (ret == -ENOENT) {
+ eventfd_signal(entry->ev_fd_ctx, 1);
+ syncobj_eventfd_entry_free(entry);
+ }
+ }
+ }
+
+ int
+ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+ {
+ struct drm_syncobj_eventfd *args = data;
+ struct drm_syncobj *syncobj;
+ struct eventfd_ctx *ev_fd_ctx;
+ struct syncobj_eventfd_entry *entry;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)
+ return -EINVAL;
+
+ if (args->pad)
+ return -EINVAL;
+
+ syncobj = drm_syncobj_find(file_private, args->handle);
+ if (!syncobj)
+ return -ENOENT;
+
+ ev_fd_ctx = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(ev_fd_ctx))
+ return PTR_ERR(ev_fd_ctx);
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ eventfd_ctx_put(ev_fd_ctx);
+ return -ENOMEM;
+ }
+ entry->syncobj = syncobj;
+ entry->ev_fd_ctx = ev_fd_ctx;
+ entry->point = args->point;
+ entry->flags = args->flags;
+
+ drm_syncobj_add_eventfd(syncobj, entry);
+ drm_syncobj_put(syncobj);
+
+ return 0;
+ }
int
drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
- #include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_connector.h>
static const struct drm_display_mode starry_ili9882t_default_mode = {
.clock = 165280,
.hdisplay = 1200,
- .hsync_start = 1200 + 32,
- .hsync_end = 1200 + 32 + 30,
- .htotal = 1200 + 32 + 30 + 32,
+ .hsync_start = 1200 + 72,
+ .hsync_end = 1200 + 72 + 30,
+ .htotal = 1200 + 72 + 30 + 72,
.vdisplay = 1920,
.vsync_start = 1920 + 68,
.vsync_end = 1920 + 68 + 2,
bool prepared;
- ktime_t prepared_time;
ktime_t unprepared_time;
const struct panel_desc *desc;
if (p->desc->delay.prepare)
msleep(p->desc->delay.prepare);
- p->prepared_time = ktime_get_boottime();
-
return 0;
}
return -ENOMEM;
panel->enabled = false;
- panel->prepared_time = 0;
panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
.vsync_start = 480 + 49,
.vsync_end = 480 + 49 + 2,
.vtotal = 480 + 49 + 2 + 22,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc powertip_ph800480t013_idf02 = {
#include <linux/err.h>
#include <linux/hdmi.h>
#include <linux/mfd/syscon.h>
+ #include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
- #include <linux/of_device.h>
+ #include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
adap->dev.parent = hdmi->dev;
adap->dev.of_node = hdmi->dev->of_node;
adap->algo = &inno_hdmi_algorithm;
- strlcpy(adap->name, "Inno HDMI", sizeof(adap->name));
+ strscpy(adap->name, "Inno HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
ret = i2c_add_adapter(adap);
#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/module.h>
- #include <linux/of_device.h>
+ #include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
}
static const struct clk_ops tegra_clk_sor_pad_ops = {
+ .determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = tegra_clk_sor_pad_set_parent,
.get_parent = tegra_clk_sor_pad_get_parent,
};