Pull drm fixes from Simona Vetter:
 "Very quiet, all just small stuff and nothing scary pending to my
  knowledge:

   - drm_panic: bunch of size calculation fixes

   - pantor: fix kernel panic on partial gpu va unmap

   - rockchip: hdmi hotplug setup fix

   - amdgpu: dp mst, dc/display fixes

   - i915: fix panic structure leak

   - xe: madvise uapi fix, wq alloc error, vma flag handling fix"

* tag 'drm-fixes-2025-10-24' of https://gitlab.freedesktop.org/drm/kernel:
  drm/xe: Check return value of GGTT workqueue allocation
  drm/amd/display: use GFP_NOWAIT for allocation in interrupt handler
  drm/amd/display: increase max link count and fix link->enc NULL pointer access
  drm/amd/display: Fix NULL pointer dereference
  drm/panic: Fix 24bit pixel crossing page boundaries
  drm/panic: Fix divide by 0 if the screen width < font width
  drm/panic: Fix kmsg text drawing rectangle
  drm/panic: Fix qr_code, ensure vmargin is positive
  drm/panic: Fix overlap between qr code and logo
  drm/panic: Fix drawing the logo on a small narrow screen
  drm/xe/uapi: Hide the madvise autoreset behind a VM_BIND flag
  drm/xe: Retain vma flags when recreating and splitting vmas for madvise
  drm/i915/panic: fix panic structure allocation memory leak
  drm/panthor: Fix kernel panic on partial unmap of a GPU VA region
  drm/rockchip: dw_hdmi: use correct SCLIN mask for RK3228
This commit is contained in:
Linus Torvalds
2025-10-24 16:49:16 -07:00
14 changed files with 153 additions and 95 deletions

View File

@@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
struct dc_stream_state *stream,
struct dc_crtc_timing_adjust *adjust)
{
struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
if (!offload_work) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
return;
}
struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
if (!adjust_copy) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
kfree(offload_work);

View File

@@ -200,6 +200,9 @@ void dcn401_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
if (link->ep_type != DISPLAY_ENDPOINT_PHY)
continue;
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */

View File

@@ -44,7 +44,13 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
#define MAX_LINKS (MAX_PIPES * 2 +2)
#define MAX_DPIA 6
#define MAX_CONNECTOR 6
#define MAX_VIRTUAL_LINKS 4
#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4

View File

@@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
struct audio_output audio_output[MAX_PIPES];
struct dc_stream_state *streams_on_link[MAX_PIPES];
int num_streams_on_link = 0;
struct dc *dc = (struct dc *)link->dc;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
}
}
}

View File

@@ -174,6 +174,33 @@ static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
*p = color & 0xff;
}
/*
* Special case if the pixel crosses page boundaries
*/
static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page,
unsigned int offset, u32 color)
{
u8 *vaddr2;
u8 *p = vaddr + offset;
vaddr2 = kmap_local_page_try_from_panic(next_page);
*p++ = color & 0xff;
color >>= 8;
if (offset == PAGE_SIZE - 1)
p = vaddr2;
*p++ = color & 0xff;
color >>= 8;
if (offset == PAGE_SIZE - 2)
p = vaddr2;
*p = color & 0xff;
kunmap_local(vaddr2);
}
static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
{
u32 *p = vaddr + offset;
@@ -231,7 +258,14 @@ static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
if (vaddr)
if (!vaddr)
continue;
// Special case for 24bit, as a pixel might cross page boundaries
if (cpp == 3 && offset + 3 > PAGE_SIZE)
drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
offset, fg32);
else
drm_panic_write_pixel(vaddr, offset, fg32, cpp);
}
}
@@ -321,7 +355,15 @@ static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
drm_panic_write_pixel(vaddr, offset, color, cpp);
if (!vaddr)
continue;
// Special case for 24bit, as a pixel might cross page boundaries
if (cpp == 3 && offset + 3 > PAGE_SIZE)
drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
offset, color);
else
drm_panic_write_pixel(vaddr, offset, color, cpp);
}
}
if (vaddr)
@@ -429,6 +471,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f
static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
const struct font_desc *font, u32 fg_color)
{
if (rect->x2 > sb->width || rect->y2 > sb->height)
return;
if (logo_mono)
drm_panic_blit(sb, rect, logo_mono->data,
DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
@@ -477,7 +522,7 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
struct drm_panic_line *line, int yoffset, u32 fg_color)
{
int chars_per_row = sb->width / font->width;
struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height);
struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height);
struct drm_panic_line line_wrap;
if (line->len > chars_per_row) {
@@ -520,7 +565,7 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
struct drm_panic_line line;
int yoffset;
if (!font)
if (!font || font->width > sb->width)
return;
yoffset = sb->height - font->height - (sb->height % font->height) / 2;
@@ -733,7 +778,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
pr_debug("QR width %d and scale %d\n", qr_width, scale);
r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5;
v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg);
if (v_margin < 0)
return -ENOSPC;
v_margin /= 5;
drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
@@ -746,7 +794,7 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
/* Fill with the background color, and draw text on top */
drm_panic_fill(sb, &r_screen, bg_color);
if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr))
if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas))
drm_panic_logo_draw(sb, &r_logo, font, fg_color);
draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);

View File

@@ -2117,6 +2117,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
kfree(intel_fb->panic);
kfree(intel_fb);
}
@@ -2215,16 +2216,22 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct intel_display *display = to_intel_display(obj->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
int ret = -EINVAL;
int ret;
int i;
intel_fb->panic = intel_panic_alloc();
if (!intel_fb->panic)
return -ENOMEM;
/*
* intel_frontbuffer_get() must be done before
* intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
*/
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
if (!intel_fb->frontbuffer)
return -ENOMEM;
if (!intel_fb->frontbuffer) {
ret = -ENOMEM;
goto err_free_panic;
}
ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
if (ret)
@@ -2323,6 +2330,9 @@ err_bo_framebuffer_fini:
intel_fb_bo_framebuffer_fini(obj);
err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
err_free_panic:
kfree(intel_fb->panic);
return ret;
}
@@ -2349,20 +2359,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct intel_framebuffer *intel_framebuffer_alloc(void)
{
struct intel_framebuffer *intel_fb;
struct intel_panic *panic;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb)
return NULL;
panic = intel_panic_alloc();
if (!panic) {
kfree(intel_fb);
return NULL;
}
intel_fb->panic = panic;
return intel_fb;
}

View File

@@ -1175,10 +1175,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
break;
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
/* Partial unmaps might trigger a remap with either a prev or a next VA,
* but not both.
/* Two VMAs can be needed for an unmap, as an unmap can happen
* in the middle of a drm_gpuva, requiring a remap with both
* prev & next VA. Or an unmap can span more than one drm_gpuva
* where the first and last ones are covered partially, requring
* a remap for the first with a prev VA and remap for the last
* with a next VA.
*/
vma_count = 1;
vma_count = 2;
break;
default:

View File

@@ -361,7 +361,7 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,
FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) |
FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1));
FIELD_PREP_WM16(RK3228_HDMI_SCLIN_MSK, 1));
}
static enum drm_connector_status

View File

@@ -292,6 +292,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
if (!ggtt->wq)
return -ENOMEM;
__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);

View File

@@ -2022,7 +2022,7 @@ static int op_prepare(struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
!op->map.invalidate_on_bind) ||
op->map.is_cpu_addr_mirror)
(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
@@ -2252,7 +2252,7 @@ static void op_commit(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
op->map.is_cpu_addr_mirror)
(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,

View File

@@ -302,6 +302,11 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64
if (!vma)
return -EINVAL;
if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
return 0;
}
if (xe_vma_has_default_mem_attrs(vma))
return 0;

View File

@@ -616,6 +616,13 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
vops->pt_update_ops[i].num_ops += inc_val;
}
#define XE_VMA_CREATE_MASK ( \
XE_VMA_READ_ONLY | \
XE_VMA_DUMPABLE | \
XE_VMA_SYSTEM_ALLOCATOR | \
DRM_GPUVA_SPARSE | \
XE_VMA_MADV_AUTORESET)
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
@@ -628,8 +635,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
op->base.map.gem.offset = vma->gpuva.gem.offset;
op->map.vma = vma;
op->map.immediate = true;
op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
op->map.is_null = xe_vma_is_null(vma);
op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
}
static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
@@ -932,11 +938,6 @@ static void xe_vma_free(struct xe_vma *vma)
kfree(vma);
}
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
@@ -947,11 +948,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_vma *vma;
struct xe_tile *tile;
u8 id;
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
bool is_cpu_addr_mirror =
(flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
bool is_null = (flags & DRM_GPUVA_SPARSE);
bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -972,10 +970,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (!vma)
return ERR_PTR(-ENOMEM);
if (is_cpu_addr_mirror)
vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
if (is_null)
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
if (bo)
vma->gpuva.gem.obj = &bo->ttm.base;
}
@@ -986,10 +980,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.vm = &vm->gpuvm;
vma->gpuva.va.addr = start;
vma->gpuva.va.range = end - start + 1;
if (read_only)
vma->gpuva.flags |= XE_VMA_READ_ONLY;
if (dumpable)
vma->gpuva.flags |= XE_VMA_DUMPABLE;
vma->gpuva.flags = flags;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -2272,12 +2263,16 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.immediate =
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.is_cpu_addr_mirror = flags &
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
if (flags & DRM_XE_VM_BIND_FLAG_READONLY)
op->map.vma_flags |= XE_VMA_READ_ONLY;
if (flags & DRM_XE_VM_BIND_FLAG_NULL)
op->map.vma_flags |= DRM_GPUVA_SPARSE;
if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
op->map.vma_flags |= XE_VMA_DUMPABLE;
if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
op->map.pat_index = pat_index;
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
@@ -2590,14 +2585,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
.pat_index = op->map.pat_index,
};
flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ?
VMA_CREATE_FLAG_DUMPABLE : 0;
flags |= op->map.is_cpu_addr_mirror ?
VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
vma = new_vma(vm, &op->base.map, &default_attr,
flags);
@@ -2606,7 +2594,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
op->map.vma = vma;
if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
!op->map.is_cpu_addr_mirror) ||
!(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask, 1);
@@ -2637,18 +2625,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
flags |= op->base.remap.unmap->va->flags &
XE_VMA_READ_ONLY ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->base.remap.unmap->va->flags &
XE_VMA_DUMPABLE ?
VMA_CREATE_FLAG_DUMPABLE : 0;
flags |= xe_vma_is_cpu_addr_mirror(old) ?
VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
&old->attr, flags);
@@ -3279,7 +3256,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
@@ -3394,7 +3372,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
XE_IOCTL_DBG(xe, obj &&
op == DRM_XE_VM_BIND_OP_UNMAP)) {
op == DRM_XE_VM_BIND_OP_UNMAP) ||
XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
(!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -4212,7 +4192,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
struct xe_vma_ops vops;
struct drm_gpuva_ops *ops = NULL;
struct drm_gpuva_op *__op;
bool is_cpu_addr_mirror = false;
unsigned int vma_flags = 0;
bool remap_op = false;
struct xe_vma_mem_attr tmp_attr;
u16 default_pat;
@@ -4242,15 +4222,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
vma = gpuva_to_vma(op->base.unmap.va);
XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
default_pat = vma->attr.default_pat_index;
vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_REMAP) {
vma = gpuva_to_vma(op->base.remap.unmap->va);
default_pat = vma->attr.default_pat_index;
vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.is_cpu_addr_mirror = true;
op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
op->map.pat_index = default_pat;
}
} else {
@@ -4259,11 +4241,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
xe_assert(vm->xe, !remap_op);
xe_assert(vm->xe, xe_vma_has_no_bo(vma));
remap_op = true;
if (xe_vma_is_cpu_addr_mirror(vma))
is_cpu_addr_mirror = true;
else
is_cpu_addr_mirror = false;
vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_MAP) {
@@ -4272,10 +4250,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
/*
* In case of madvise ops DRM_GPUVA_OP_MAP is
* always after DRM_GPUVA_OP_REMAP, so ensure
* we assign op->map.is_cpu_addr_mirror true
* if REMAP is for xe_vma_is_cpu_addr_mirror vma
* to propagate the flags from the vma we're
* unmapping.
*/
op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
}
}
print_op(vm->xe, __op);

View File

@@ -46,6 +46,7 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
#define XE_VMA_MADV_AUTORESET (DRM_GPUVA_USERBITS << 10)
/**
* struct xe_vma_mem_attr - memory attributes associated with vma
@@ -345,17 +346,10 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
unsigned int vma_flags;
/** @immediate: Immediate bind */
bool immediate;
/** @read_only: Read only */
bool read_only;
/** @is_null: is NULL binding */
bool is_null;
/** @is_cpu_addr_mirror: is CPU address mirror binding */
bool is_cpu_addr_mirror;
/** @dumpable: whether BO is dumped on GPU hang */
bool dumpable;
/** @invalidate: invalidate the VMA before bind */
bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;

View File

@@ -1013,6 +1013,20 @@ struct drm_xe_vm_destroy {
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
* - %DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET - Can be used in combination with
* %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR to reset madvises when the underlying
* CPU address space range is unmapped (typically with munmap(2) or brk(2)).
* The madvise values set with &DRM_IOCTL_XE_MADVISE are reset to the values
* that were present immediately after the &DRM_IOCTL_XE_VM_BIND.
* The reset GPU virtual address range is the intersection of the range bound
* using &DRM_IOCTL_XE_VM_BIND and the virtual CPU address space range
* unmapped.
* This functionality is present to mimic the behaviour of CPU address space
* madvises set using madvise(2), which are typically reset on unmap.
* Note: free(3) may or may not call munmap(2) and/or brk(2), and may thus
* not invoke autoreset. Neither will stack variables going out of scope.
* Therefore it's recommended to always explicitly reset the madvises when
* freeing the memory backing a region used in a &DRM_IOCTL_XE_MADVISE call.
*
* The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
* - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
@@ -1119,6 +1133,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
#define DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET (1 << 6)
/** @flags: Bind flags */
__u32 flags;