Linux 6.1.31 headers
This commit is contained in:
1186
drm/amdgpu_drm.h
Normal file
1186
drm/amdgpu_drm.h
Normal file
File diff suppressed because it is too large
Load Diff
56
drm/armada_drm.h
Normal file
56
drm/armada_drm.h
Normal file
@ -0,0 +1,56 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
* With inspiration from the i915 driver
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef DRM_ARMADA_IOCTL_H
|
||||
#define DRM_ARMADA_IOCTL_H
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define DRM_ARMADA_GEM_CREATE 0x00
|
||||
#define DRM_ARMADA_GEM_MMAP 0x02
|
||||
#define DRM_ARMADA_GEM_PWRITE 0x03
|
||||
|
||||
#define ARMADA_IOCTL(dir, name, str) \
|
||||
DRM_##dir(DRM_COMMAND_BASE + DRM_ARMADA_##name, struct drm_armada_##str)
|
||||
|
||||
struct drm_armada_gem_create {
|
||||
__u32 handle;
|
||||
__u32 size;
|
||||
};
|
||||
#define DRM_IOCTL_ARMADA_GEM_CREATE \
|
||||
ARMADA_IOCTL(IOWR, GEM_CREATE, gem_create)
|
||||
|
||||
struct drm_armada_gem_mmap {
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
__u64 offset;
|
||||
__u64 size;
|
||||
__u64 addr;
|
||||
};
|
||||
#define DRM_IOCTL_ARMADA_GEM_MMAP \
|
||||
ARMADA_IOCTL(IOWR, GEM_MMAP, gem_mmap)
|
||||
|
||||
struct drm_armada_gem_pwrite {
|
||||
__u64 ptr;
|
||||
__u32 handle;
|
||||
__u32 offset;
|
||||
__u32 size;
|
||||
};
|
||||
#define DRM_IOCTL_ARMADA_GEM_PWRITE \
|
||||
ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
1485
drm/drm_fourcc.h
Normal file
1485
drm/drm_fourcc.h
Normal file
File diff suppressed because it is too large
Load Diff
1249
drm/drm_mode.h
Normal file
1249
drm/drm_mode.h
Normal file
File diff suppressed because it is too large
Load Diff
92
drm/drm_sarea.h
Normal file
92
drm/drm_sarea.h
Normal file
@ -0,0 +1,92 @@
|
||||
/**
|
||||
* \file drm_sarea.h
|
||||
* \brief SAREA definitions
|
||||
*
|
||||
* \author Michel Dänzer <michel@daenzer.net>
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DRM_SAREA_H_
|
||||
#define _DRM_SAREA_H_
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* SAREA area needs to be at least a page */
|
||||
#if defined(__alpha__)
|
||||
#define SAREA_MAX 0x2000U
|
||||
#elif defined(__mips__)
|
||||
#define SAREA_MAX 0x4000U
|
||||
#elif defined(__ia64__)
|
||||
#define SAREA_MAX 0x10000U /* 64kB */
|
||||
#else
|
||||
/* Intel 830M driver needs at least 8k SAREA */
|
||||
#define SAREA_MAX 0x2000U
|
||||
#endif
|
||||
|
||||
/** Maximum number of drawables in the SAREA */
|
||||
#define SAREA_MAX_DRAWABLES 256
|
||||
|
||||
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
|
||||
|
||||
/** SAREA drawable */
|
||||
struct drm_sarea_drawable {
|
||||
unsigned int stamp;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/** SAREA frame */
|
||||
struct drm_sarea_frame {
|
||||
unsigned int x;
|
||||
unsigned int y;
|
||||
unsigned int width;
|
||||
unsigned int height;
|
||||
unsigned int fullscreen;
|
||||
};
|
||||
|
||||
/** SAREA */
|
||||
struct drm_sarea {
|
||||
/** first thing is always the DRM locking structure */
|
||||
struct drm_hw_lock lock;
|
||||
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */
|
||||
struct drm_hw_lock drawable_lock;
|
||||
struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
|
||||
struct drm_sarea_frame frame; /**< frame */
|
||||
drm_context_t dummy_context;
|
||||
};
|
||||
|
||||
typedef struct drm_sarea_drawable drm_sarea_drawable_t;
|
||||
typedef struct drm_sarea_frame drm_sarea_frame_t;
|
||||
typedef struct drm_sarea drm_sarea_t;
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _DRM_SAREA_H_ */
|
300
drm/etnaviv_drm.h
Normal file
300
drm/etnaviv_drm.h
Normal file
@ -0,0 +1,300 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (C) 2015 Etnaviv Project
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ETNAVIV_DRM_H__
|
||||
#define __ETNAVIV_DRM_H__
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Please note that modifications to all structs defined here are
|
||||
* subject to backwards-compatibility constraints:
|
||||
* 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
|
||||
* user/kernel compatibility
|
||||
* 2) Keep fields aligned to their size
|
||||
* 3) Because of how drm_ioctl() works, we can add new fields at
|
||||
* the end of an ioctl if some care is taken: drm_ioctl() will
|
||||
* zero out the new fields at the tail of the ioctl, so a zero
|
||||
* value should have a backwards compatible meaning. And for
|
||||
* output params, userspace won't see the newly added output
|
||||
* fields.. so that has to be somehow ok.
|
||||
*/
|
||||
|
||||
/* timeouts are specified in clock-monotonic absolute times (to simplify
|
||||
* restarting interrupted ioctls). The following struct is logically the
|
||||
* same as 'struct timespec' but 32/64b ABI safe.
|
||||
*/
|
||||
struct drm_etnaviv_timespec {
|
||||
__s64 tv_sec; /* seconds */
|
||||
__s64 tv_nsec; /* nanoseconds */
|
||||
};
|
||||
|
||||
#define ETNAVIV_PARAM_GPU_MODEL 0x01
|
||||
#define ETNAVIV_PARAM_GPU_REVISION 0x02
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_0 0x03
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_1 0x04
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_2 0x05
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_3 0x06
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_7 0x0a
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_8 0x0b
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_9 0x0c
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_10 0x0d
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_11 0x0e
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_12 0x0f
|
||||
|
||||
#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
|
||||
#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
|
||||
#define ETNAVIV_PARAM_GPU_THREAD_COUNT 0x12
|
||||
#define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE 0x13
|
||||
#define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT 0x14
|
||||
#define ETNAVIV_PARAM_GPU_PIXEL_PIPES 0x15
|
||||
#define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16
|
||||
#define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17
|
||||
#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
|
||||
#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
|
||||
#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
|
||||
#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b
|
||||
#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
|
||||
#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
|
||||
#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
|
||||
|
||||
#define ETNA_MAX_PIPES 4
|
||||
|
||||
struct drm_etnaviv_param {
|
||||
__u32 pipe; /* in */
|
||||
__u32 param; /* in, ETNAVIV_PARAM_x */
|
||||
__u64 value; /* out (get_param) or in (set_param) */
|
||||
};
|
||||
|
||||
/*
|
||||
* GEM buffers:
|
||||
*/
|
||||
|
||||
#define ETNA_BO_CACHE_MASK 0x000f0000
|
||||
/* cache modes */
|
||||
#define ETNA_BO_CACHED 0x00010000
|
||||
#define ETNA_BO_WC 0x00020000
|
||||
#define ETNA_BO_UNCACHED 0x00040000
|
||||
/* map flags */
|
||||
#define ETNA_BO_FORCE_MMU 0x00100000
|
||||
|
||||
struct drm_etnaviv_gem_new {
|
||||
__u64 size; /* in */
|
||||
__u32 flags; /* in, mask of ETNA_BO_x */
|
||||
__u32 handle; /* out */
|
||||
};
|
||||
|
||||
struct drm_etnaviv_gem_info {
|
||||
__u32 handle; /* in */
|
||||
__u32 pad;
|
||||
__u64 offset; /* out, offset to pass to mmap() */
|
||||
};
|
||||
|
||||
#define ETNA_PREP_READ 0x01
|
||||
#define ETNA_PREP_WRITE 0x02
|
||||
#define ETNA_PREP_NOSYNC 0x04
|
||||
|
||||
struct drm_etnaviv_gem_cpu_prep {
|
||||
__u32 handle; /* in */
|
||||
__u32 op; /* in, mask of ETNA_PREP_x */
|
||||
struct drm_etnaviv_timespec timeout; /* in */
|
||||
};
|
||||
|
||||
struct drm_etnaviv_gem_cpu_fini {
|
||||
__u32 handle; /* in */
|
||||
__u32 flags; /* in, placeholder for now, no defined values */
|
||||
};
|
||||
|
||||
/*
|
||||
* Cmdstream Submission:
|
||||
*/
|
||||
|
||||
/* The value written into the cmdstream is logically:
|
||||
* relocbuf->gpuaddr + reloc_offset
|
||||
*
|
||||
* NOTE that reloc's must be sorted by order of increasing submit_offset,
|
||||
* otherwise EINVAL.
|
||||
*/
|
||||
struct drm_etnaviv_gem_submit_reloc {
|
||||
__u32 submit_offset; /* in, offset from submit_bo */
|
||||
__u32 reloc_idx; /* in, index of reloc_bo buffer */
|
||||
__u64 reloc_offset; /* in, offset from start of reloc_bo */
|
||||
__u32 flags; /* in, placeholder for now, no defined values */
|
||||
};
|
||||
|
||||
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
|
||||
* cmdstream buffer(s) themselves or reloc entries) has one (and only
|
||||
* one) entry in the submit->bos[] table.
|
||||
*
|
||||
* As a optimization, the current buffer (gpu virtual address) can be
|
||||
* passed back through the 'presumed' field. If on a subsequent reloc,
|
||||
* userspace passes back a 'presumed' address that is still valid,
|
||||
* then patching the cmdstream for this entry is skipped. This can
|
||||
* avoid kernel needing to map/access the cmdstream bo in the common
|
||||
* case.
|
||||
* If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed'
|
||||
* field is interpreted as the fixed location to map the bo into the gpu
|
||||
* virtual address space. If the kernel is unable to map the buffer at
|
||||
* this location the submit will fail. This means userspace is responsible
|
||||
* for the whole gpu virtual address management.
|
||||
*/
|
||||
#define ETNA_SUBMIT_BO_READ 0x0001
|
||||
#define ETNA_SUBMIT_BO_WRITE 0x0002
|
||||
struct drm_etnaviv_gem_submit_bo {
|
||||
__u32 flags; /* in, mask of ETNA_SUBMIT_BO_x */
|
||||
__u32 handle; /* in, GEM handle */
|
||||
__u64 presumed; /* in/out, presumed buffer address */
|
||||
};
|
||||
|
||||
/* performance monitor request (pmr) */
|
||||
#define ETNA_PM_PROCESS_PRE 0x0001
|
||||
#define ETNA_PM_PROCESS_POST 0x0002
|
||||
struct drm_etnaviv_gem_submit_pmr {
|
||||
__u32 flags; /* in, when to process request (ETNA_PM_PROCESS_x) */
|
||||
__u8 domain; /* in, pm domain */
|
||||
__u8 pad;
|
||||
__u16 signal; /* in, pm signal */
|
||||
__u32 sequence; /* in, sequence number */
|
||||
__u32 read_offset; /* in, offset from read_bo */
|
||||
__u32 read_idx; /* in, index of read_bo buffer */
|
||||
};
|
||||
|
||||
/* Each cmdstream submit consists of a table of buffers involved, and
|
||||
* one or more cmdstream buffers. This allows for conditional execution
|
||||
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
|
||||
*/
|
||||
#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
|
||||
#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
|
||||
#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
|
||||
#define ETNA_SUBMIT_SOFTPIN 0x0008
|
||||
#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
|
||||
ETNA_SUBMIT_FENCE_FD_IN | \
|
||||
ETNA_SUBMIT_FENCE_FD_OUT| \
|
||||
ETNA_SUBMIT_SOFTPIN)
|
||||
#define ETNA_PIPE_3D 0x00
|
||||
#define ETNA_PIPE_2D 0x01
|
||||
#define ETNA_PIPE_VG 0x02
|
||||
struct drm_etnaviv_gem_submit {
|
||||
__u32 fence; /* out */
|
||||
__u32 pipe; /* in */
|
||||
__u32 exec_state; /* in, initial execution state (ETNA_PIPE_x) */
|
||||
__u32 nr_bos; /* in, number of submit_bo's */
|
||||
__u32 nr_relocs; /* in, number of submit_reloc's */
|
||||
__u32 stream_size; /* in, cmdstream size */
|
||||
__u64 bos; /* in, ptr to array of submit_bo's */
|
||||
__u64 relocs; /* in, ptr to array of submit_reloc's */
|
||||
__u64 stream; /* in, ptr to cmdstream */
|
||||
__u32 flags; /* in, mask of ETNA_SUBMIT_x */
|
||||
__s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
|
||||
__u64 pmrs; /* in, ptr to array of submit_pmr's */
|
||||
__u32 nr_pmrs; /* in, number of submit_pmr's */
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
/* The normal way to synchronize with the GPU is just to CPU_PREP on
|
||||
* a buffer if you need to access it from the CPU (other cmdstream
|
||||
* submission from same or other contexts, PAGE_FLIP ioctl, etc, all
|
||||
* handle the required synchronization under the hood). This ioctl
|
||||
* mainly just exists as a way to implement the gallium pipe_fence
|
||||
* APIs without requiring a dummy bo to synchronize on.
|
||||
*/
|
||||
#define ETNA_WAIT_NONBLOCK 0x01
|
||||
struct drm_etnaviv_wait_fence {
|
||||
__u32 pipe; /* in */
|
||||
__u32 fence; /* in */
|
||||
__u32 flags; /* in, mask of ETNA_WAIT_x */
|
||||
__u32 pad;
|
||||
struct drm_etnaviv_timespec timeout; /* in */
|
||||
};
|
||||
|
||||
#define ETNA_USERPTR_READ 0x01
|
||||
#define ETNA_USERPTR_WRITE 0x02
|
||||
struct drm_etnaviv_gem_userptr {
|
||||
__u64 user_ptr; /* in, page aligned user pointer */
|
||||
__u64 user_size; /* in, page aligned user size */
|
||||
__u32 flags; /* in, flags */
|
||||
__u32 handle; /* out, non-zero handle */
|
||||
};
|
||||
|
||||
struct drm_etnaviv_gem_wait {
|
||||
__u32 pipe; /* in */
|
||||
__u32 handle; /* in, bo to be waited for */
|
||||
__u32 flags; /* in, mask of ETNA_WAIT_x */
|
||||
__u32 pad;
|
||||
struct drm_etnaviv_timespec timeout; /* in */
|
||||
};
|
||||
|
||||
/*
|
||||
* Performance Monitor (PM):
|
||||
*/
|
||||
|
||||
struct drm_etnaviv_pm_domain {
|
||||
__u32 pipe; /* in */
|
||||
__u8 iter; /* in/out, select pm domain at index iter */
|
||||
__u8 id; /* out, id of domain */
|
||||
__u16 nr_signals; /* out, how many signals does this domain provide */
|
||||
char name[64]; /* out, name of domain */
|
||||
};
|
||||
|
||||
struct drm_etnaviv_pm_signal {
|
||||
__u32 pipe; /* in */
|
||||
__u8 domain; /* in, pm domain index */
|
||||
__u8 pad;
|
||||
__u16 iter; /* in/out, select pm source at index iter */
|
||||
__u16 id; /* out, id of signal */
|
||||
char name[64]; /* out, name of domain */
|
||||
};
|
||||
|
||||
#define DRM_ETNAVIV_GET_PARAM 0x00
|
||||
/* placeholder:
|
||||
#define DRM_ETNAVIV_SET_PARAM 0x01
|
||||
*/
|
||||
#define DRM_ETNAVIV_GEM_NEW 0x02
|
||||
#define DRM_ETNAVIV_GEM_INFO 0x03
|
||||
#define DRM_ETNAVIV_GEM_CPU_PREP 0x04
|
||||
#define DRM_ETNAVIV_GEM_CPU_FINI 0x05
|
||||
#define DRM_ETNAVIV_GEM_SUBMIT 0x06
|
||||
#define DRM_ETNAVIV_WAIT_FENCE 0x07
|
||||
#define DRM_ETNAVIV_GEM_USERPTR 0x08
|
||||
#define DRM_ETNAVIV_GEM_WAIT 0x09
|
||||
#define DRM_ETNAVIV_PM_QUERY_DOM 0x0a
|
||||
#define DRM_ETNAVIV_PM_QUERY_SIG 0x0b
|
||||
#define DRM_ETNAVIV_NUM_IOCTLS 0x0c
|
||||
|
||||
#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit)
|
||||
#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
|
||||
#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
|
||||
#define DRM_IOCTL_ETNAVIV_PM_QUERY_DOM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_DOM, struct drm_etnaviv_pm_domain)
|
||||
#define DRM_IOCTL_ETNAVIV_PM_QUERY_SIG DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_SIG, struct drm_etnaviv_pm_signal)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __ETNAVIV_DRM_H__ */
|
424
drm/exynos_drm.h
Normal file
424
drm/exynos_drm.h
Normal file
@ -0,0 +1,424 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
|
||||
/* exynos_drm.h
|
||||
*
|
||||
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
|
||||
* Authors:
|
||||
* Inki Dae <inki.dae@samsung.com>
|
||||
* Joonyoung Shim <jy0922.shim@samsung.com>
|
||||
* Seung-Woo Kim <sw0312.kim@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _EXYNOS_DRM_H_
|
||||
#define _EXYNOS_DRM_H_
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* User-desired buffer creation information structure.
|
||||
*
|
||||
* @size: user-desired memory allocation size.
|
||||
* - this size value would be page-aligned internally.
|
||||
* @flags: user request for setting memory type or cache attributes.
|
||||
* @handle: returned a handle to created gem object.
|
||||
* - this handle will be set by gem module of kernel side.
|
||||
*/
|
||||
struct drm_exynos_gem_create {
|
||||
__u64 size;
|
||||
__u32 flags;
|
||||
__u32 handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* A structure for getting a fake-offset that can be used with mmap.
|
||||
*
|
||||
* @handle: handle of gem object.
|
||||
* @reserved: just padding to be 64-bit aligned.
|
||||
* @offset: a fake-offset of gem object.
|
||||
*/
|
||||
struct drm_exynos_gem_map {
|
||||
__u32 handle;
|
||||
__u32 reserved;
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
/**
|
||||
* A structure to gem information.
|
||||
*
|
||||
* @handle: a handle to gem object created.
|
||||
* @flags: flag value including memory type and cache attribute and
|
||||
* this value would be set by driver.
|
||||
* @size: size to memory region allocated by gem and this size would
|
||||
* be set by driver.
|
||||
*/
|
||||
struct drm_exynos_gem_info {
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
__u64 size;
|
||||
};
|
||||
|
||||
/**
|
||||
* A structure for user connection request of virtual display.
|
||||
*
|
||||
* @connection: indicate whether doing connection or not by user.
|
||||
* @extensions: if this value is 1 then the vidi driver would need additional
|
||||
* 128bytes edid data.
|
||||
* @edid: the edid data pointer from user side.
|
||||
*/
|
||||
struct drm_exynos_vidi_connection {
|
||||
__u32 connection;
|
||||
__u32 extensions;
|
||||
__u64 edid;
|
||||
};
|
||||
|
||||
/* memory type definitions. */
|
||||
enum e_drm_exynos_gem_mem_type {
|
||||
/* Physically Continuous memory and used as default. */
|
||||
EXYNOS_BO_CONTIG = 0 << 0,
|
||||
/* Physically Non-Continuous memory. */
|
||||
EXYNOS_BO_NONCONTIG = 1 << 0,
|
||||
/* non-cachable mapping and used as default. */
|
||||
EXYNOS_BO_NONCACHABLE = 0 << 1,
|
||||
/* cachable mapping. */
|
||||
EXYNOS_BO_CACHABLE = 1 << 1,
|
||||
/* write-combine mapping. */
|
||||
EXYNOS_BO_WC = 1 << 2,
|
||||
EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE |
|
||||
EXYNOS_BO_WC
|
||||
};
|
||||
|
||||
struct drm_exynos_g2d_get_ver {
|
||||
__u32 major;
|
||||
__u32 minor;
|
||||
};
|
||||
|
||||
struct drm_exynos_g2d_cmd {
|
||||
__u32 offset;
|
||||
__u32 data;
|
||||
};
|
||||
|
||||
enum drm_exynos_g2d_buf_type {
|
||||
G2D_BUF_USERPTR = 1 << 31,
|
||||
};
|
||||
|
||||
enum drm_exynos_g2d_event_type {
|
||||
G2D_EVENT_NOT,
|
||||
G2D_EVENT_NONSTOP,
|
||||
G2D_EVENT_STOP, /* not yet */
|
||||
};
|
||||
|
||||
struct drm_exynos_g2d_userptr {
|
||||
unsigned long userptr;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
struct drm_exynos_g2d_set_cmdlist {
|
||||
__u64 cmd;
|
||||
__u64 cmd_buf;
|
||||
__u32 cmd_nr;
|
||||
__u32 cmd_buf_nr;
|
||||
|
||||
/* for g2d event */
|
||||
__u64 event_type;
|
||||
__u64 user_data;
|
||||
};
|
||||
|
||||
struct drm_exynos_g2d_exec {
|
||||
__u64 async;
|
||||
};
|
||||
|
||||
/* Exynos DRM IPP v2 API */
|
||||
|
||||
/**
|
||||
* Enumerate available IPP hardware modules.
|
||||
*
|
||||
* @count_ipps: size of ipp_id array / number of ipp modules (set by driver)
|
||||
* @reserved: padding
|
||||
* @ipp_id_ptr: pointer to ipp_id array or NULL
|
||||
*/
|
||||
struct drm_exynos_ioctl_ipp_get_res {
|
||||
__u32 count_ipps;
|
||||
__u32 reserved;
|
||||
__u64 ipp_id_ptr;
|
||||
};
|
||||
|
||||
enum drm_exynos_ipp_format_type {
|
||||
DRM_EXYNOS_IPP_FORMAT_SOURCE = 0x01,
|
||||
DRM_EXYNOS_IPP_FORMAT_DESTINATION = 0x02,
|
||||
};
|
||||
|
||||
struct drm_exynos_ipp_format {
|
||||
__u32 fourcc;
|
||||
__u32 type;
|
||||
__u64 modifier;
|
||||
};
|
||||
|
||||
enum drm_exynos_ipp_capability {
|
||||
DRM_EXYNOS_IPP_CAP_CROP = 0x01,
|
||||
DRM_EXYNOS_IPP_CAP_ROTATE = 0x02,
|
||||
DRM_EXYNOS_IPP_CAP_SCALE = 0x04,
|
||||
DRM_EXYNOS_IPP_CAP_CONVERT = 0x08,
|
||||
};
|
||||
|
||||
/**
|
||||
* Get IPP hardware capabilities and supported image formats.
|
||||
*
|
||||
* @ipp_id: id of IPP module to query
|
||||
* @capabilities: bitmask of drm_exynos_ipp_capability (set by driver)
|
||||
* @reserved: padding
|
||||
* @formats_count: size of formats array (in entries) / number of filled
|
||||
* formats (set by driver)
|
||||
* @formats_ptr: pointer to formats array or NULL
|
||||
*/
|
||||
struct drm_exynos_ioctl_ipp_get_caps {
|
||||
__u32 ipp_id;
|
||||
__u32 capabilities;
|
||||
__u32 reserved;
|
||||
__u32 formats_count;
|
||||
__u64 formats_ptr;
|
||||
};
|
||||
|
||||
enum drm_exynos_ipp_limit_type {
|
||||
/* size (horizontal/vertial) limits, in pixels (min, max, alignment) */
|
||||
DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE = 0x0001,
|
||||
/* scale ratio (horizonta/vertial), 16.16 fixed point (min, max) */
|
||||
DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE = 0x0002,
|
||||
|
||||
/* image buffer area */
|
||||
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER = 0x0001 << 16,
|
||||
/* src/dst rectangle area */
|
||||
DRM_EXYNOS_IPP_LIMIT_SIZE_AREA = 0x0002 << 16,
|
||||
/* src/dst rectangle area when rotation enabled */
|
||||
DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED = 0x0003 << 16,
|
||||
|
||||
DRM_EXYNOS_IPP_LIMIT_TYPE_MASK = 0x000f,
|
||||
DRM_EXYNOS_IPP_LIMIT_SIZE_MASK = 0x000f << 16,
|
||||
};
|
||||
|
||||
struct drm_exynos_ipp_limit_val {
|
||||
__u32 min;
|
||||
__u32 max;
|
||||
__u32 align;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
/**
|
||||
* IPP module limitation.
|
||||
*
|
||||
* @type: limit type (see drm_exynos_ipp_limit_type enum)
|
||||
* @reserved: padding
|
||||
* @h: horizontal limits
|
||||
* @v: vertical limits
|
||||
*/
|
||||
struct drm_exynos_ipp_limit {
|
||||
__u32 type;
|
||||
__u32 reserved;
|
||||
struct drm_exynos_ipp_limit_val h;
|
||||
struct drm_exynos_ipp_limit_val v;
|
||||
};
|
||||
|
||||
/**
|
||||
* Get IPP limits for given image format.
|
||||
*
|
||||
* @ipp_id: id of IPP module to query
|
||||
* @fourcc: image format code (see DRM_FORMAT_* in drm_fourcc.h)
|
||||
* @modifier: image format modifier (see DRM_FORMAT_MOD_* in drm_fourcc.h)
|
||||
* @type: source/destination identifier (drm_exynos_ipp_format_flag enum)
|
||||
* @limits_count: size of limits array (in entries) / number of filled entries
|
||||
* (set by driver)
|
||||
* @limits_ptr: pointer to limits array or NULL
|
||||
*/
|
||||
struct drm_exynos_ioctl_ipp_get_limits {
|
||||
__u32 ipp_id;
|
||||
__u32 fourcc;
|
||||
__u64 modifier;
|
||||
__u32 type;
|
||||
__u32 limits_count;
|
||||
__u64 limits_ptr;
|
||||
};
|
||||
|
||||
enum drm_exynos_ipp_task_id {
|
||||
/* buffer described by struct drm_exynos_ipp_task_buffer */
|
||||
DRM_EXYNOS_IPP_TASK_BUFFER = 0x0001,
|
||||
/* rectangle described by struct drm_exynos_ipp_task_rect */
|
||||
DRM_EXYNOS_IPP_TASK_RECTANGLE = 0x0002,
|
||||
/* transformation described by struct drm_exynos_ipp_task_transform */
|
||||
DRM_EXYNOS_IPP_TASK_TRANSFORM = 0x0003,
|
||||
/* alpha configuration described by struct drm_exynos_ipp_task_alpha */
|
||||
DRM_EXYNOS_IPP_TASK_ALPHA = 0x0004,
|
||||
|
||||
/* source image data (for buffer and rectangle chunks) */
|
||||
DRM_EXYNOS_IPP_TASK_TYPE_SOURCE = 0x0001 << 16,
|
||||
/* destination image data (for buffer and rectangle chunks) */
|
||||
DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION = 0x0002 << 16,
|
||||
};
|
||||
|
||||
/**
|
||||
* Memory buffer with image data.
|
||||
*
|
||||
* @id: must be DRM_EXYNOS_IPP_TASK_BUFFER
|
||||
* other parameters are same as for AddFB2 generic DRM ioctl
|
||||
*/
|
||||
struct drm_exynos_ipp_task_buffer {
|
||||
__u32 id;
|
||||
__u32 fourcc;
|
||||
__u32 width, height;
|
||||
__u32 gem_id[4];
|
||||
__u32 offset[4];
|
||||
__u32 pitch[4];
|
||||
__u64 modifier;
|
||||
};
|
||||
|
||||
/**
|
||||
* Rectangle for processing.
|
||||
*
|
||||
* @id: must be DRM_EXYNOS_IPP_TASK_RECTANGLE
|
||||
* @reserved: padding
|
||||
* @x,@y: left corner in pixels
|
||||
* @w,@h: width/height in pixels
|
||||
*/
|
||||
struct drm_exynos_ipp_task_rect {
|
||||
__u32 id;
|
||||
__u32 reserved;
|
||||
__u32 x;
|
||||
__u32 y;
|
||||
__u32 w;
|
||||
__u32 h;
|
||||
};
|
||||
|
||||
/**
|
||||
* Image tranformation description.
|
||||
*
|
||||
* @id: must be DRM_EXYNOS_IPP_TASK_TRANSFORM
|
||||
* @rotation: DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* values
|
||||
*/
|
||||
struct drm_exynos_ipp_task_transform {
|
||||
__u32 id;
|
||||
__u32 rotation;
|
||||
};
|
||||
|
||||
/**
|
||||
* Image global alpha configuration for formats without alpha values.
|
||||
*
|
||||
* @id: must be DRM_EXYNOS_IPP_TASK_ALPHA
|
||||
* @value: global alpha value (0-255)
|
||||
*/
|
||||
struct drm_exynos_ipp_task_alpha {
|
||||
__u32 id;
|
||||
__u32 value;
|
||||
};
|
||||
|
||||
enum drm_exynos_ipp_flag {
|
||||
/* generate DRM event after processing */
|
||||
DRM_EXYNOS_IPP_FLAG_EVENT = 0x01,
|
||||
/* dry run, only check task parameters */
|
||||
DRM_EXYNOS_IPP_FLAG_TEST_ONLY = 0x02,
|
||||
/* non-blocking processing */
|
||||
DRM_EXYNOS_IPP_FLAG_NONBLOCK = 0x04,
|
||||
};
|
||||
|
||||
#define DRM_EXYNOS_IPP_FLAGS (DRM_EXYNOS_IPP_FLAG_EVENT |\
|
||||
DRM_EXYNOS_IPP_FLAG_TEST_ONLY | DRM_EXYNOS_IPP_FLAG_NONBLOCK)
|
||||
|
||||
/**
|
||||
* Perform image processing described by array of drm_exynos_ipp_task_*
|
||||
* structures (parameters array).
|
||||
*
|
||||
* @ipp_id: id of IPP module to run the task
|
||||
* @flags: bitmask of drm_exynos_ipp_flag values
|
||||
* @reserved: padding
|
||||
* @params_size: size of parameters array (in bytes)
|
||||
* @params_ptr: pointer to parameters array or NULL
|
||||
* @user_data: (optional) data for drm event
|
||||
*/
|
||||
struct drm_exynos_ioctl_ipp_commit {
|
||||
__u32 ipp_id;
|
||||
__u32 flags;
|
||||
__u32 reserved;
|
||||
__u32 params_size;
|
||||
__u64 params_ptr;
|
||||
__u64 user_data;
|
||||
};
|
||||
|
||||
#define DRM_EXYNOS_GEM_CREATE 0x00
|
||||
#define DRM_EXYNOS_GEM_MAP 0x01
|
||||
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
|
||||
#define DRM_EXYNOS_GEM_GET 0x04
|
||||
#define DRM_EXYNOS_VIDI_CONNECTION 0x07
|
||||
|
||||
/* G2D */
|
||||
#define DRM_EXYNOS_G2D_GET_VER 0x20
|
||||
#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
|
||||
#define DRM_EXYNOS_G2D_EXEC 0x22
|
||||
|
||||
/* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */
|
||||
/* IPP - Image Post Processing */
|
||||
#define DRM_EXYNOS_IPP_GET_RESOURCES 0x40
|
||||
#define DRM_EXYNOS_IPP_GET_CAPS 0x41
|
||||
#define DRM_EXYNOS_IPP_GET_LIMITS 0x42
|
||||
#define DRM_EXYNOS_IPP_COMMIT 0x43
|
||||
|
||||
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
|
||||
#define DRM_IOCTL_EXYNOS_GEM_MAP DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_GEM_MAP, struct drm_exynos_gem_map)
|
||||
#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
|
||||
|
||||
#define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
|
||||
|
||||
#define DRM_IOCTL_EXYNOS_G2D_GET_VER DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_G2D_GET_VER, struct drm_exynos_g2d_get_ver)
|
||||
#define DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_G2D_SET_CMDLIST, struct drm_exynos_g2d_set_cmdlist)
|
||||
#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
|
||||
|
||||
#define DRM_IOCTL_EXYNOS_IPP_GET_RESOURCES DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_IPP_GET_RESOURCES, \
|
||||
struct drm_exynos_ioctl_ipp_get_res)
|
||||
#define DRM_IOCTL_EXYNOS_IPP_GET_CAPS DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_IPP_GET_CAPS, struct drm_exynos_ioctl_ipp_get_caps)
|
||||
#define DRM_IOCTL_EXYNOS_IPP_GET_LIMITS DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_IPP_GET_LIMITS, \
|
||||
struct drm_exynos_ioctl_ipp_get_limits)
|
||||
#define DRM_IOCTL_EXYNOS_IPP_COMMIT DRM_IOWR(DRM_COMMAND_BASE + \
|
||||
DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit)
|
||||
|
||||
/* Exynos specific events */
|
||||
#define DRM_EXYNOS_G2D_EVENT 0x80000000
|
||||
#define DRM_EXYNOS_IPP_EVENT 0x80000002
|
||||
|
||||
struct drm_exynos_g2d_event {
|
||||
struct drm_event base;
|
||||
__u64 user_data;
|
||||
__u32 tv_sec;
|
||||
__u32 tv_usec;
|
||||
__u32 cmdlist_no;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct drm_exynos_ipp_event {
|
||||
struct drm_event base;
|
||||
__u64 user_data;
|
||||
__u32 tv_sec;
|
||||
__u32 tv_usec;
|
||||
__u32 ipp_id;
|
||||
__u32 sequence;
|
||||
__u64 reserved;
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _EXYNOS_DRM_H_ */
|
2233
drm/habanalabs_accel.h
Normal file
2233
drm/habanalabs_accel.h
Normal file
File diff suppressed because it is too large
Load Diff
292
drm/i810_drm.h
Normal file
292
drm/i810_drm.h
Normal file
@ -0,0 +1,292 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
#ifndef _I810_DRM_H_
|
||||
#define _I810_DRM_H_
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* WARNING: These defines must be the same as what the Xserver uses.
|
||||
* if you change them, you must change the defines in the Xserver.
|
||||
*/
|
||||
|
||||
#ifndef _I810_DEFINES_
|
||||
#define _I810_DEFINES_
|
||||
|
||||
#define I810_DMA_BUF_ORDER 12
|
||||
#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
|
||||
#define I810_DMA_BUF_NR 256
|
||||
#define I810_NR_SAREA_CLIPRECTS 8
|
||||
|
||||
/* Each region is a minimum of 64k, and there are at most 64 of them.
|
||||
*/
|
||||
#define I810_NR_TEX_REGIONS 64
|
||||
#define I810_LOG_MIN_TEX_REGION_SIZE 16
|
||||
#endif
|
||||
|
||||
#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
|
||||
#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
|
||||
#define I810_UPLOAD_CTX 0x4
|
||||
#define I810_UPLOAD_BUFFERS 0x8
|
||||
#define I810_UPLOAD_TEX0 0x10
|
||||
#define I810_UPLOAD_TEX1 0x20
|
||||
#define I810_UPLOAD_CLIPRECTS 0x40
|
||||
|
||||
/* Indices into buf.Setup where various bits of state are mirrored per
|
||||
* context and per buffer. These can be fired at the card as a unit,
|
||||
* or in a piecewise fashion as required.
|
||||
*/
|
||||
|
||||
/* Destbuffer state
|
||||
* - backbuffer linear offset and pitch -- invarient in the current dri
|
||||
* - zbuffer linear offset and pitch -- also invarient
|
||||
* - drawing origin in back and depth buffers.
|
||||
*
|
||||
* Keep the depth/back buffer state here to accommodate private buffers
|
||||
* in the future.
|
||||
*/
|
||||
#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
|
||||
#define I810_DESTREG_DI1 1
|
||||
#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
|
||||
#define I810_DESTREG_DV1 3
|
||||
#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */
|
||||
#define I810_DESTREG_DR1 5
|
||||
#define I810_DESTREG_DR2 6
|
||||
#define I810_DESTREG_DR3 7
|
||||
#define I810_DESTREG_DR4 8
|
||||
#define I810_DEST_SETUP_SIZE 10
|
||||
|
||||
/* Context state
|
||||
*/
|
||||
#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
|
||||
#define I810_CTXREG_CF1 1
|
||||
#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
|
||||
#define I810_CTXREG_ST1 3
|
||||
#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
|
||||
#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
|
||||
#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
|
||||
#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
|
||||
#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
|
||||
#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
|
||||
#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
|
||||
#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
|
||||
#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */
|
||||
#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */
|
||||
#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */
|
||||
#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */
|
||||
#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
|
||||
#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */
|
||||
#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */
|
||||
#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */
|
||||
#define I810_CTX_SETUP_SIZE 20
|
||||
|
||||
/* Texture state (per tex unit)
|
||||
*/
|
||||
#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */
|
||||
#define I810_TEXREG_MI1 1
|
||||
#define I810_TEXREG_MI2 2
|
||||
#define I810_TEXREG_MI3 3
|
||||
#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */
|
||||
#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */
|
||||
#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */
|
||||
#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
|
||||
#define I810_TEX_SETUP_SIZE 8
|
||||
|
||||
/* Flags for clear ioctl
|
||||
*/
|
||||
#define I810_FRONT 0x1
|
||||
#define I810_BACK 0x2
|
||||
#define I810_DEPTH 0x4
|
||||
|
||||
typedef enum _drm_i810_init_func {
|
||||
I810_INIT_DMA = 0x01,
|
||||
I810_CLEANUP_DMA = 0x02,
|
||||
I810_INIT_DMA_1_4 = 0x03
|
||||
} drm_i810_init_func_t;
|
||||
|
||||
/* This is the init structure after v1.2 */
|
||||
typedef struct _drm_i810_init {
|
||||
drm_i810_init_func_t func;
|
||||
unsigned int mmio_offset;
|
||||
unsigned int buffers_offset;
|
||||
int sarea_priv_offset;
|
||||
unsigned int ring_start;
|
||||
unsigned int ring_end;
|
||||
unsigned int ring_size;
|
||||
unsigned int front_offset;
|
||||
unsigned int back_offset;
|
||||
unsigned int depth_offset;
|
||||
unsigned int overlay_offset;
|
||||
unsigned int overlay_physical;
|
||||
unsigned int w;
|
||||
unsigned int h;
|
||||
unsigned int pitch;
|
||||
unsigned int pitch_bits;
|
||||
} drm_i810_init_t;
|
||||
|
||||
/* This is the init structure prior to v1.2 */
|
||||
typedef struct _drm_i810_pre12_init {
|
||||
drm_i810_init_func_t func;
|
||||
unsigned int mmio_offset;
|
||||
unsigned int buffers_offset;
|
||||
int sarea_priv_offset;
|
||||
unsigned int ring_start;
|
||||
unsigned int ring_end;
|
||||
unsigned int ring_size;
|
||||
unsigned int front_offset;
|
||||
unsigned int back_offset;
|
||||
unsigned int depth_offset;
|
||||
unsigned int w;
|
||||
unsigned int h;
|
||||
unsigned int pitch;
|
||||
unsigned int pitch_bits;
|
||||
} drm_i810_pre12_init_t;
|
||||
|
||||
/* Warning: If you change the SAREA structure you must change the Xserver
|
||||
* structure as well */
|
||||
|
||||
typedef struct _drm_i810_tex_region {
|
||||
unsigned char next, prev; /* indices to form a circular LRU */
|
||||
unsigned char in_use; /* owned by a client, or free? */
|
||||
int age; /* tracked by clients to update local LRU's */
|
||||
} drm_i810_tex_region_t;
|
||||
|
||||
typedef struct _drm_i810_sarea {
|
||||
unsigned int ContextState[I810_CTX_SETUP_SIZE];
|
||||
unsigned int BufferState[I810_DEST_SETUP_SIZE];
|
||||
unsigned int TexState[2][I810_TEX_SETUP_SIZE];
|
||||
unsigned int dirty;
|
||||
|
||||
unsigned int nbox;
|
||||
struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
|
||||
|
||||
/* Maintain an LRU of contiguous regions of texture space. If
|
||||
* you think you own a region of texture memory, and it has an
|
||||
* age different to the one you set, then you are mistaken and
|
||||
* it has been stolen by another client. If global texAge
|
||||
* hasn't changed, there is no need to walk the list.
|
||||
*
|
||||
* These regions can be used as a proxy for the fine-grained
|
||||
* texture information of other clients - by maintaining them
|
||||
* in the same lru which is used to age their own textures,
|
||||
* clients have an approximate lru for the whole of global
|
||||
* texture space, and can make informed decisions as to which
|
||||
* areas to kick out. There is no need to choose whether to
|
||||
* kick out your own texture or someone else's - simply eject
|
||||
* them all in LRU order.
|
||||
*/
|
||||
|
||||
drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS + 1];
|
||||
/* Last elt is sentinal */
|
||||
int texAge; /* last time texture was uploaded */
|
||||
int last_enqueue; /* last time a buffer was enqueued */
|
||||
int last_dispatch; /* age of the most recently dispatched buffer */
|
||||
int last_quiescent; /* */
|
||||
int ctxOwner; /* last context to upload state */
|
||||
|
||||
int vertex_prim;
|
||||
|
||||
int pf_enabled; /* is pageflipping allowed? */
|
||||
int pf_active;
|
||||
int pf_current_page; /* which buffer is being displayed? */
|
||||
} drm_i810_sarea_t;
|
||||
|
||||
/* WARNING: If you change any of these defines, make sure to change the
|
||||
* defines in the Xserver file (xf86drmMga.h)
|
||||
*/
|
||||
|
||||
/* i810 specific ioctls
|
||||
* The device specific ioctl range is 0x40 to 0x79.
|
||||
*/
|
||||
#define DRM_I810_INIT 0x00
|
||||
#define DRM_I810_VERTEX 0x01
|
||||
#define DRM_I810_CLEAR 0x02
|
||||
#define DRM_I810_FLUSH 0x03
|
||||
#define DRM_I810_GETAGE 0x04
|
||||
#define DRM_I810_GETBUF 0x05
|
||||
#define DRM_I810_SWAP 0x06
|
||||
#define DRM_I810_COPY 0x07
|
||||
#define DRM_I810_DOCOPY 0x08
|
||||
#define DRM_I810_OV0INFO 0x09
|
||||
#define DRM_I810_FSTATUS 0x0a
|
||||
#define DRM_I810_OV0FLIP 0x0b
|
||||
#define DRM_I810_MC 0x0c
|
||||
#define DRM_I810_RSTATUS 0x0d
|
||||
#define DRM_I810_FLIP 0x0e
|
||||
|
||||
#define DRM_IOCTL_I810_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I810_INIT, drm_i810_init_t)
|
||||
#define DRM_IOCTL_I810_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I810_VERTEX, drm_i810_vertex_t)
|
||||
#define DRM_IOCTL_I810_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I810_CLEAR, drm_i810_clear_t)
|
||||
#define DRM_IOCTL_I810_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_I810_FLUSH)
|
||||
#define DRM_IOCTL_I810_GETAGE DRM_IO( DRM_COMMAND_BASE + DRM_I810_GETAGE)
|
||||
#define DRM_IOCTL_I810_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I810_GETBUF, drm_i810_dma_t)
|
||||
#define DRM_IOCTL_I810_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_I810_SWAP)
|
||||
#define DRM_IOCTL_I810_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I810_COPY, drm_i810_copy_t)
|
||||
#define DRM_IOCTL_I810_DOCOPY DRM_IO( DRM_COMMAND_BASE + DRM_I810_DOCOPY)
|
||||
#define DRM_IOCTL_I810_OV0INFO DRM_IOR( DRM_COMMAND_BASE + DRM_I810_OV0INFO, drm_i810_overlay_t)
|
||||
#define DRM_IOCTL_I810_FSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FSTATUS)
|
||||
#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_OV0FLIP)
|
||||
#define DRM_IOCTL_I810_MC DRM_IOW( DRM_COMMAND_BASE + DRM_I810_MC, drm_i810_mc_t)
|
||||
#define DRM_IOCTL_I810_RSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_RSTATUS)
|
||||
#define DRM_IOCTL_I810_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FLIP)
|
||||
|
||||
typedef struct _drm_i810_clear {
|
||||
int clear_color;
|
||||
int clear_depth;
|
||||
int flags;
|
||||
} drm_i810_clear_t;
|
||||
|
||||
/* These may be placeholders if we have more cliprects than
|
||||
* I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
|
||||
* false, indicating that the buffer will be dispatched again with a
|
||||
* new set of cliprects.
|
||||
*/
|
||||
typedef struct _drm_i810_vertex {
|
||||
int idx; /* buffer index */
|
||||
int used; /* nr bytes in use */
|
||||
int discard; /* client is finished with the buffer? */
|
||||
} drm_i810_vertex_t;
|
||||
|
||||
typedef struct _drm_i810_copy_t {
|
||||
int idx; /* buffer index */
|
||||
int used; /* nr bytes in use */
|
||||
void *address; /* Address to copy from */
|
||||
} drm_i810_copy_t;
|
||||
|
||||
#define PR_TRIANGLES (0x0<<18)
|
||||
#define PR_TRISTRIP_0 (0x1<<18)
|
||||
#define PR_TRISTRIP_1 (0x2<<18)
|
||||
#define PR_TRIFAN (0x3<<18)
|
||||
#define PR_POLYGON (0x4<<18)
|
||||
#define PR_LINES (0x5<<18)
|
||||
#define PR_LINESTRIP (0x6<<18)
|
||||
#define PR_RECTS (0x7<<18)
|
||||
#define PR_MASK (0x7<<18)
|
||||
|
||||
typedef struct drm_i810_dma {
|
||||
void *virtual;
|
||||
int request_idx;
|
||||
int request_size;
|
||||
int granted;
|
||||
} drm_i810_dma_t;
|
||||
|
||||
typedef struct _drm_i810_overlay_t {
|
||||
unsigned int offset; /* Address of the Overlay Regs */
|
||||
unsigned int physical;
|
||||
} drm_i810_overlay_t;
|
||||
|
||||
typedef struct _drm_i810_mc {
|
||||
int idx; /* buffer index */
|
||||
int used; /* nr bytes in use */
|
||||
int num_blocks; /* number of GFXBlocks */
|
||||
int *length; /* List of lengths for GFXBlocks (FUTURE) */
|
||||
unsigned int last_render; /* Last Render Request */
|
||||
} drm_i810_mc_t;
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _I810_DRM_H_ */
|
3724
drm/i915_drm.h
Normal file
3724
drm/i915_drm.h
Normal file
File diff suppressed because it is too large
Load Diff
306
drm/ivpu_accel.h
Normal file
306
drm/ivpu_accel.h
Normal file
@ -0,0 +1,306 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __UAPI_IVPU_DRM_H__
|
||||
#define __UAPI_IVPU_DRM_H__
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define DRM_IVPU_DRIVER_MAJOR 1
|
||||
#define DRM_IVPU_DRIVER_MINOR 0
|
||||
|
||||
#define DRM_IVPU_GET_PARAM 0x00
|
||||
#define DRM_IVPU_SET_PARAM 0x01
|
||||
#define DRM_IVPU_BO_CREATE 0x02
|
||||
#define DRM_IVPU_BO_INFO 0x03
|
||||
#define DRM_IVPU_SUBMIT 0x05
|
||||
#define DRM_IVPU_BO_WAIT 0x06
|
||||
|
||||
#define DRM_IOCTL_IVPU_GET_PARAM \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
|
||||
|
||||
#define DRM_IOCTL_IVPU_SET_PARAM \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param)
|
||||
|
||||
#define DRM_IOCTL_IVPU_BO_CREATE \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create)
|
||||
|
||||
#define DRM_IOCTL_IVPU_BO_INFO \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info)
|
||||
|
||||
#define DRM_IOCTL_IVPU_SUBMIT \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit)
|
||||
|
||||
#define DRM_IOCTL_IVPU_BO_WAIT \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
|
||||
|
||||
/**
|
||||
* DOC: contexts
|
||||
*
|
||||
* VPU contexts have private virtual address space, job queues and priority.
|
||||
* Each context is identified by an unique ID. Context is created on open().
|
||||
*/
|
||||
|
||||
#define DRM_IVPU_PARAM_DEVICE_ID 0
|
||||
#define DRM_IVPU_PARAM_DEVICE_REVISION 1
|
||||
#define DRM_IVPU_PARAM_PLATFORM_TYPE 2
|
||||
#define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
|
||||
#define DRM_IVPU_PARAM_NUM_CONTEXTS 4
|
||||
#define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
|
||||
#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6
|
||||
#define DRM_IVPU_PARAM_CONTEXT_ID 7
|
||||
#define DRM_IVPU_PARAM_FW_API_VERSION 8
|
||||
#define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
|
||||
#define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
|
||||
#define DRM_IVPU_PARAM_TILE_CONFIG 11
|
||||
#define DRM_IVPU_PARAM_SKU 12
|
||||
|
||||
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
|
||||
|
||||
#define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
|
||||
#define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
|
||||
#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
|
||||
#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
|
||||
|
||||
/**
|
||||
* struct drm_ivpu_param - Get/Set VPU parameters
|
||||
*/
|
||||
struct drm_ivpu_param {
|
||||
/**
|
||||
* @param:
|
||||
*
|
||||
* Supported params:
|
||||
*
|
||||
* %DRM_IVPU_PARAM_DEVICE_ID:
|
||||
* PCI Device ID of the VPU device (read-only)
|
||||
*
|
||||
* %DRM_IVPU_PARAM_DEVICE_REVISION:
|
||||
* VPU device revision (read-only)
|
||||
*
|
||||
* %DRM_IVPU_PARAM_PLATFORM_TYPE:
|
||||
* Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific
|
||||
* platform type when executing on a simulator or emulator (read-only)
|
||||
*
|
||||
* %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
|
||||
* Current PLL frequency (read-only)
|
||||
*
|
||||
* %DRM_IVPU_PARAM_NUM_CONTEXTS:
|
||||
* Maximum number of simultaneously existing contexts (read-only)
|
||||
*
|
||||
* %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
|
||||
* Lowest VPU virtual address available in the current context (read-only)
|
||||
*
|
||||
* %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
|
||||
* Value of current context scheduling priority (read-write).
|
||||
* See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
|
||||
*
|
||||
* %DRM_IVPU_PARAM_CONTEXT_ID:
|
||||
* Current context ID, always greater than 0 (read-only)
|
||||
*
|
||||
* %DRM_IVPU_PARAM_FW_API_VERSION:
|
||||
* Firmware API version array (read-only)
|
||||
*
|
||||
* %DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
|
||||
* Heartbeat value from an engine (read-only).
|
||||
* Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index.
|
||||
*
|
||||
* %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
|
||||
* Device-unique inference ID (read-only)
|
||||
*
|
||||
* %DRM_IVPU_PARAM_TILE_CONFIG:
|
||||
* VPU tile configuration (read-only)
|
||||
*
|
||||
* %DRM_IVPU_PARAM_SKU:
|
||||
* VPU SKU ID (read-only)
|
||||
*
|
||||
*/
|
||||
__u32 param;
|
||||
|
||||
/** @index: Index for params that have multiple instances */
|
||||
__u32 index;
|
||||
|
||||
/** @value: Param value */
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
#define DRM_IVPU_BO_HIGH_MEM 0x00000001
|
||||
#define DRM_IVPU_BO_MAPPABLE 0x00000002
|
||||
|
||||
#define DRM_IVPU_BO_CACHED 0x00000000
|
||||
#define DRM_IVPU_BO_UNCACHED 0x00010000
|
||||
#define DRM_IVPU_BO_WC 0x00020000
|
||||
#define DRM_IVPU_BO_CACHE_MASK 0x00030000
|
||||
|
||||
#define DRM_IVPU_BO_FLAGS \
|
||||
(DRM_IVPU_BO_HIGH_MEM | \
|
||||
DRM_IVPU_BO_MAPPABLE | \
|
||||
DRM_IVPU_BO_CACHE_MASK)
|
||||
|
||||
/**
|
||||
* struct drm_ivpu_bo_create - Create BO backed by SHMEM
|
||||
*
|
||||
* Create GEM buffer object allocated in SHMEM memory.
|
||||
*/
|
||||
struct drm_ivpu_bo_create {
|
||||
/** @size: The size in bytes of the allocated memory */
|
||||
__u64 size;
|
||||
|
||||
/**
|
||||
* @flags:
|
||||
*
|
||||
* Supported flags:
|
||||
*
|
||||
* %DRM_IVPU_BO_HIGH_MEM:
|
||||
*
|
||||
* Allocate VPU address from >4GB range.
|
||||
* Buffer object with vpu address >4GB can be always accessed by the
|
||||
* VPU DMA engine, but some HW generation may not be able to access
|
||||
* this memory from then firmware running on the VPU management processor.
|
||||
* Suitable for input, output and some scratch buffers.
|
||||
*
|
||||
* %DRM_IVPU_BO_MAPPABLE:
|
||||
*
|
||||
* Buffer object can be mapped using mmap().
|
||||
*
|
||||
* %DRM_IVPU_BO_CACHED:
|
||||
*
|
||||
* Allocated BO will be cached on host side (WB) and snooped on the VPU side.
|
||||
* This is the default caching mode.
|
||||
*
|
||||
* %DRM_IVPU_BO_UNCACHED:
|
||||
*
|
||||
* Allocated BO will not be cached on host side nor snooped on the VPU side.
|
||||
*
|
||||
* %DRM_IVPU_BO_WC:
|
||||
*
|
||||
* Allocated BO will use write combining buffer for writes but reads will be
|
||||
* uncached.
|
||||
*/
|
||||
__u32 flags;
|
||||
|
||||
/** @handle: Returned GEM object handle */
|
||||
__u32 handle;
|
||||
|
||||
/** @vpu_addr: Returned VPU virtual address */
|
||||
__u64 vpu_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_ivpu_bo_info - Query buffer object info
|
||||
*/
|
||||
struct drm_ivpu_bo_info {
|
||||
/** @handle: Handle of the queried BO */
|
||||
__u32 handle;
|
||||
|
||||
/** @flags: Returned flags used to create the BO */
|
||||
__u32 flags;
|
||||
|
||||
/** @vpu_addr: Returned VPU virtual address */
|
||||
__u64 vpu_addr;
|
||||
|
||||
/**
|
||||
* @mmap_offset:
|
||||
*
|
||||
* Returned offset to be used in mmap(). 0 in case the BO is not mappable.
|
||||
*/
|
||||
__u64 mmap_offset;
|
||||
|
||||
/** @size: Returned GEM object size, aligned to PAGE_SIZE */
|
||||
__u64 size;
|
||||
};
|
||||
|
||||
/* drm_ivpu_submit engines */
|
||||
#define DRM_IVPU_ENGINE_COMPUTE 0
|
||||
#define DRM_IVPU_ENGINE_COPY 1
|
||||
|
||||
/**
|
||||
* struct drm_ivpu_submit - Submit commands to the VPU
|
||||
*
|
||||
* Execute a single command buffer on a given VPU engine.
|
||||
* Handles to all referenced buffer objects have to be provided in @buffers_ptr.
|
||||
*
|
||||
* User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl.
|
||||
*/
|
||||
struct drm_ivpu_submit {
|
||||
/**
|
||||
* @buffers_ptr:
|
||||
*
|
||||
* A pointer to an u32 array of GEM handles of the BOs required for this job.
|
||||
* The number of elements in the array must be equal to the value given by @buffer_count.
|
||||
*
|
||||
* The first BO is the command buffer. The rest of array has to contain all
|
||||
* BOs referenced from the command buffer.
|
||||
*/
|
||||
__u64 buffers_ptr;
|
||||
|
||||
/** @buffer_count: Number of elements in the @buffers_ptr */
|
||||
__u32 buffer_count;
|
||||
|
||||
/**
|
||||
* @engine: Select the engine this job should be executed on
|
||||
*
|
||||
* %DRM_IVPU_ENGINE_COMPUTE:
|
||||
*
|
||||
* Performs Deep Learning Neural Compute Inference Operations
|
||||
*
|
||||
* %DRM_IVPU_ENGINE_COPY:
|
||||
*
|
||||
* Performs memory copy operations to/from system memory allocated for VPU
|
||||
*/
|
||||
__u32 engine;
|
||||
|
||||
/** @flags: Reserved for future use - must be zero */
|
||||
__u32 flags;
|
||||
|
||||
/**
|
||||
* @commands_offset:
|
||||
*
|
||||
* Offset inside the first buffer in @buffers_ptr containing commands
|
||||
* to be executed. The offset has to be 8-byte aligned.
|
||||
*/
|
||||
__u32 commands_offset;
|
||||
};
|
||||
|
||||
/* drm_ivpu_bo_wait job status codes */
|
||||
#define DRM_IVPU_JOB_STATUS_SUCCESS 0
|
||||
|
||||
/**
|
||||
* struct drm_ivpu_bo_wait - Wait for BO to become inactive
|
||||
*
|
||||
* Blocks until a given buffer object becomes inactive.
|
||||
* With @timeout_ms set to 0 returns immediately.
|
||||
*/
|
||||
struct drm_ivpu_bo_wait {
|
||||
/** @handle: Handle to the buffer object to be waited on */
|
||||
__u32 handle;
|
||||
|
||||
/** @flags: Reserved for future use - must be zero */
|
||||
__u32 flags;
|
||||
|
||||
/** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */
|
||||
__s64 timeout_ns;
|
||||
|
||||
/**
|
||||
* @job_status:
|
||||
*
|
||||
* Job status code which is updated after the job is completed.
|
||||
* &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise.
|
||||
* Valid only if @handle points to a command buffer.
|
||||
*/
|
||||
__u32 job_status;
|
||||
|
||||
/** @pad: Padding - must be zero */
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __UAPI_IVPU_DRM_H__ */
|
176
drm/lima_drm.h
Normal file
176
drm/lima_drm.h
Normal file
@ -0,0 +1,176 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
|
||||
/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
|
||||
|
||||
#ifndef __LIMA_DRM_H__
|
||||
#define __LIMA_DRM_H__
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
enum drm_lima_param_gpu_id {
|
||||
DRM_LIMA_PARAM_GPU_ID_UNKNOWN,
|
||||
DRM_LIMA_PARAM_GPU_ID_MALI400,
|
||||
DRM_LIMA_PARAM_GPU_ID_MALI450,
|
||||
};
|
||||
|
||||
enum drm_lima_param {
|
||||
DRM_LIMA_PARAM_GPU_ID,
|
||||
DRM_LIMA_PARAM_NUM_PP,
|
||||
DRM_LIMA_PARAM_GP_VERSION,
|
||||
DRM_LIMA_PARAM_PP_VERSION,
|
||||
};
|
||||
|
||||
/**
|
||||
* get various information of the GPU
|
||||
*/
|
||||
struct drm_lima_get_param {
|
||||
__u32 param; /* in, value in enum drm_lima_param */
|
||||
__u32 pad; /* pad, must be zero */
|
||||
__u64 value; /* out, parameter value */
|
||||
};
|
||||
|
||||
/*
|
||||
* heap buffer dynamically increase backup memory size when GP task fail
|
||||
* due to lack of heap memory. size field of heap buffer is an up bound of
|
||||
* the backup memory which can be set to a fairly large value.
|
||||
*/
|
||||
#define LIMA_BO_FLAG_HEAP (1 << 0)
|
||||
|
||||
/**
|
||||
* create a buffer for used by GPU
|
||||
*/
|
||||
struct drm_lima_gem_create {
|
||||
__u32 size; /* in, buffer size */
|
||||
__u32 flags; /* in, buffer flags */
|
||||
__u32 handle; /* out, GEM buffer handle */
|
||||
__u32 pad; /* pad, must be zero */
|
||||
};
|
||||
|
||||
/**
|
||||
* get information of a buffer
|
||||
*/
|
||||
struct drm_lima_gem_info {
|
||||
__u32 handle; /* in, GEM buffer handle */
|
||||
__u32 va; /* out, virtual address mapped into GPU MMU */
|
||||
__u64 offset; /* out, used to mmap this buffer to CPU */
|
||||
};
|
||||
|
||||
#define LIMA_SUBMIT_BO_READ 0x01
|
||||
#define LIMA_SUBMIT_BO_WRITE 0x02
|
||||
|
||||
/* buffer information used by one task */
|
||||
struct drm_lima_gem_submit_bo {
|
||||
__u32 handle; /* in, GEM buffer handle */
|
||||
__u32 flags; /* in, buffer read/write by GPU */
|
||||
};
|
||||
|
||||
#define LIMA_GP_FRAME_REG_NUM 6
|
||||
|
||||
/* frame used to setup GP for each task */
|
||||
struct drm_lima_gp_frame {
|
||||
__u32 frame[LIMA_GP_FRAME_REG_NUM];
|
||||
};
|
||||
|
||||
#define LIMA_PP_FRAME_REG_NUM 23
|
||||
#define LIMA_PP_WB_REG_NUM 12
|
||||
|
||||
/* frame used to setup mali400 GPU PP for each task */
|
||||
struct drm_lima_m400_pp_frame {
|
||||
__u32 frame[LIMA_PP_FRAME_REG_NUM];
|
||||
__u32 num_pp;
|
||||
__u32 wb[3 * LIMA_PP_WB_REG_NUM];
|
||||
__u32 plbu_array_address[4];
|
||||
__u32 fragment_stack_address[4];
|
||||
};
|
||||
|
||||
/* frame used to setup mali450 GPU PP for each task */
|
||||
struct drm_lima_m450_pp_frame {
|
||||
__u32 frame[LIMA_PP_FRAME_REG_NUM];
|
||||
__u32 num_pp;
|
||||
__u32 wb[3 * LIMA_PP_WB_REG_NUM];
|
||||
__u32 use_dlbu;
|
||||
__u32 _pad;
|
||||
union {
|
||||
__u32 plbu_array_address[8];
|
||||
__u32 dlbu_regs[4];
|
||||
};
|
||||
__u32 fragment_stack_address[8];
|
||||
};
|
||||
|
||||
#define LIMA_PIPE_GP 0x00
|
||||
#define LIMA_PIPE_PP 0x01
|
||||
|
||||
#define LIMA_SUBMIT_FLAG_EXPLICIT_FENCE (1 << 0)
|
||||
|
||||
/**
|
||||
* submit a task to GPU
|
||||
*
|
||||
* User can always merge multi sync_file and drm_syncobj
|
||||
* into one drm_syncobj as in_sync[0], but we reserve
|
||||
* in_sync[1] for another task's out_sync to avoid the
|
||||
* export/import/merge pass when explicit sync.
|
||||
*/
|
||||
struct drm_lima_gem_submit {
|
||||
__u32 ctx; /* in, context handle task is submitted to */
|
||||
__u32 pipe; /* in, which pipe to use, GP/PP */
|
||||
__u32 nr_bos; /* in, array length of bos field */
|
||||
__u32 frame_size; /* in, size of frame field */
|
||||
__u64 bos; /* in, array of drm_lima_gem_submit_bo */
|
||||
__u64 frame; /* in, GP/PP frame */
|
||||
__u32 flags; /* in, submit flags */
|
||||
__u32 out_sync; /* in, drm_syncobj handle used to wait task finish after submission */
|
||||
__u32 in_sync[2]; /* in, drm_syncobj handle used to wait before start this task */
|
||||
};
|
||||
|
||||
#define LIMA_GEM_WAIT_READ 0x01
|
||||
#define LIMA_GEM_WAIT_WRITE 0x02
|
||||
|
||||
/**
|
||||
* wait pending GPU task finish of a buffer
|
||||
*/
|
||||
struct drm_lima_gem_wait {
|
||||
__u32 handle; /* in, GEM buffer handle */
|
||||
__u32 op; /* in, CPU want to read/write this buffer */
|
||||
__s64 timeout_ns; /* in, wait timeout in absulute time */
|
||||
};
|
||||
|
||||
/**
|
||||
* create a context
|
||||
*/
|
||||
struct drm_lima_ctx_create {
|
||||
__u32 id; /* out, context handle */
|
||||
__u32 _pad; /* pad, must be zero */
|
||||
};
|
||||
|
||||
/**
|
||||
* free a context
|
||||
*/
|
||||
struct drm_lima_ctx_free {
|
||||
__u32 id; /* in, context handle */
|
||||
__u32 _pad; /* pad, must be zero */
|
||||
};
|
||||
|
||||
#define DRM_LIMA_GET_PARAM 0x00
|
||||
#define DRM_LIMA_GEM_CREATE 0x01
|
||||
#define DRM_LIMA_GEM_INFO 0x02
|
||||
#define DRM_LIMA_GEM_SUBMIT 0x03
|
||||
#define DRM_LIMA_GEM_WAIT 0x04
|
||||
#define DRM_LIMA_CTX_CREATE 0x05
|
||||
#define DRM_LIMA_CTX_FREE 0x06
|
||||
|
||||
#define DRM_IOCTL_LIMA_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GET_PARAM, struct drm_lima_get_param)
|
||||
#define DRM_IOCTL_LIMA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_CREATE, struct drm_lima_gem_create)
|
||||
#define DRM_IOCTL_LIMA_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_INFO, struct drm_lima_gem_info)
|
||||
#define DRM_IOCTL_LIMA_GEM_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_SUBMIT, struct drm_lima_gem_submit)
|
||||
#define DRM_IOCTL_LIMA_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_WAIT, struct drm_lima_gem_wait)
|
||||
#define DRM_IOCTL_LIMA_CTX_CREATE DRM_IOR(DRM_COMMAND_BASE + DRM_LIMA_CTX_CREATE, struct drm_lima_ctx_create)
|
||||
#define DRM_IOCTL_LIMA_CTX_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_CTX_FREE, struct drm_lima_ctx_free)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __LIMA_DRM_H__ */
|
429
drm/mga_drm.h
Normal file
429
drm/mga_drm.h
Normal file
@ -0,0 +1,429 @@
|
||||
/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
|
||||
* Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Jeff Hartmann <jhartmann@valinux.com>
|
||||
* Keith Whitwell <keith@tungstengraphics.com>
|
||||
*
|
||||
* Rewritten by:
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#ifndef __MGA_DRM_H__
|
||||
#define __MGA_DRM_H__
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* WARNING: If you change any of these defines, make sure to change the
|
||||
* defines in the Xserver file (mga_sarea.h)
|
||||
*/
|
||||
|
||||
#ifndef __MGA_SAREA_DEFINES__
|
||||
#define __MGA_SAREA_DEFINES__
|
||||
|
||||
/* WARP pipe flags
|
||||
*/
|
||||
#define MGA_F 0x1 /* fog */
|
||||
#define MGA_A 0x2 /* alpha */
|
||||
#define MGA_S 0x4 /* specular */
|
||||
#define MGA_T2 0x8 /* multitexture */
|
||||
|
||||
#define MGA_WARP_TGZ 0
|
||||
#define MGA_WARP_TGZF (MGA_F)
|
||||
#define MGA_WARP_TGZA (MGA_A)
|
||||
#define MGA_WARP_TGZAF (MGA_F|MGA_A)
|
||||
#define MGA_WARP_TGZS (MGA_S)
|
||||
#define MGA_WARP_TGZSF (MGA_S|MGA_F)
|
||||
#define MGA_WARP_TGZSA (MGA_S|MGA_A)
|
||||
#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
|
||||
#define MGA_WARP_T2GZ (MGA_T2)
|
||||
#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
|
||||
#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
|
||||
#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
|
||||
#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
|
||||
#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
|
||||
#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
|
||||
#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
|
||||
|
||||
#define MGA_MAX_G200_PIPES 8 /* no multitex */
|
||||
#define MGA_MAX_G400_PIPES 16
|
||||
#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
|
||||
#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
|
||||
|
||||
#define MGA_CARD_TYPE_G200 1
|
||||
#define MGA_CARD_TYPE_G400 2
|
||||
#define MGA_CARD_TYPE_G450 3 /* not currently used */
|
||||
#define MGA_CARD_TYPE_G550 4
|
||||
|
||||
#define MGA_FRONT 0x1
|
||||
#define MGA_BACK 0x2
|
||||
#define MGA_DEPTH 0x4
|
||||
|
||||
/* What needs to be changed for the current vertex dma buffer?
|
||||
*/
|
||||
#define MGA_UPLOAD_CONTEXT 0x1
|
||||
#define MGA_UPLOAD_TEX0 0x2
|
||||
#define MGA_UPLOAD_TEX1 0x4
|
||||
#define MGA_UPLOAD_PIPE 0x8
|
||||
#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
|
||||
#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
|
||||
#define MGA_UPLOAD_2D 0x40
|
||||
#define MGA_WAIT_AGE 0x80 /* handled client-side */
|
||||
#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
|
||||
#if 0
|
||||
#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
|
||||
quiescent */
|
||||
#endif
|
||||
|
||||
/* 32 buffers of 64k each, total 2 meg.
|
||||
*/
|
||||
#define MGA_BUFFER_SIZE (1 << 16)
|
||||
#define MGA_NUM_BUFFERS 128
|
||||
|
||||
/* Keep these small for testing.
|
||||
*/
|
||||
#define MGA_NR_SAREA_CLIPRECTS 8
|
||||
|
||||
/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
|
||||
* regions, subject to a minimum region size of (1<<16) == 64k.
|
||||
*
|
||||
* Clients may subdivide regions internally, but when sharing between
|
||||
* clients, the region size is the minimum granularity.
|
||||
*/
|
||||
|
||||
#define MGA_CARD_HEAP 0
|
||||
#define MGA_AGP_HEAP 1
|
||||
#define MGA_NR_TEX_HEAPS 2
|
||||
#define MGA_NR_TEX_REGIONS 16
|
||||
#define MGA_LOG_MIN_TEX_REGION_SIZE 16
|
||||
|
||||
#define DRM_MGA_IDLE_RETRY 2048
|
||||
|
||||
#endif /* __MGA_SAREA_DEFINES__ */
|
||||
|
||||
/* Setup registers for 3D context
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned int dstorg;
|
||||
unsigned int maccess;
|
||||
unsigned int plnwt;
|
||||
unsigned int dwgctl;
|
||||
unsigned int alphactrl;
|
||||
unsigned int fogcolor;
|
||||
unsigned int wflag;
|
||||
unsigned int tdualstage0;
|
||||
unsigned int tdualstage1;
|
||||
unsigned int fcol;
|
||||
unsigned int stencil;
|
||||
unsigned int stencilctl;
|
||||
} drm_mga_context_regs_t;
|
||||
|
||||
/* Setup registers for 2D, X server
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned int pitch;
|
||||
} drm_mga_server_regs_t;
|
||||
|
||||
/* Setup registers for each texture unit
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned int texctl;
|
||||
unsigned int texctl2;
|
||||
unsigned int texfilter;
|
||||
unsigned int texbordercol;
|
||||
unsigned int texorg;
|
||||
unsigned int texwidth;
|
||||
unsigned int texheight;
|
||||
unsigned int texorg1;
|
||||
unsigned int texorg2;
|
||||
unsigned int texorg3;
|
||||
unsigned int texorg4;
|
||||
} drm_mga_texture_regs_t;
|
||||
|
||||
/* General aging mechanism
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned int head; /* Position of head pointer */
|
||||
unsigned int wrap; /* Primary DMA wrap count */
|
||||
} drm_mga_age_t;
|
||||
|
||||
typedef struct _drm_mga_sarea {
|
||||
/* The channel for communication of state information to the kernel
|
||||
* on firing a vertex dma buffer.
|
||||
*/
|
||||
drm_mga_context_regs_t context_state;
|
||||
drm_mga_server_regs_t server_state;
|
||||
drm_mga_texture_regs_t tex_state[2];
|
||||
unsigned int warp_pipe;
|
||||
unsigned int dirty;
|
||||
unsigned int vertsize;
|
||||
|
||||
/* The current cliprects, or a subset thereof.
|
||||
*/
|
||||
struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
|
||||
unsigned int nbox;
|
||||
|
||||
/* Information about the most recently used 3d drawable. The
|
||||
* client fills in the req_* fields, the server fills in the
|
||||
* exported_ fields and puts the cliprects into boxes, above.
|
||||
*
|
||||
* The client clears the exported_drawable field before
|
||||
* clobbering the boxes data.
|
||||
*/
|
||||
unsigned int req_drawable; /* the X drawable id */
|
||||
unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
|
||||
|
||||
unsigned int exported_drawable;
|
||||
unsigned int exported_index;
|
||||
unsigned int exported_stamp;
|
||||
unsigned int exported_buffers;
|
||||
unsigned int exported_nfront;
|
||||
unsigned int exported_nback;
|
||||
int exported_back_x, exported_front_x, exported_w;
|
||||
int exported_back_y, exported_front_y, exported_h;
|
||||
struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
|
||||
|
||||
/* Counters for aging textures and for client-side throttling.
|
||||
*/
|
||||
unsigned int status[4];
|
||||
unsigned int last_wrap;
|
||||
|
||||
drm_mga_age_t last_frame;
|
||||
unsigned int last_enqueue; /* last time a buffer was enqueued */
|
||||
unsigned int last_dispatch; /* age of the most recently dispatched buffer */
|
||||
unsigned int last_quiescent; /* */
|
||||
|
||||
/* LRU lists for texture memory in agp space and on the card.
|
||||
*/
|
||||
struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
|
||||
unsigned int texAge[MGA_NR_TEX_HEAPS];
|
||||
|
||||
/* Mechanism to validate card state.
|
||||
*/
|
||||
int ctxOwner;
|
||||
} drm_mga_sarea_t;
|
||||
|
||||
/* MGA specific ioctls
|
||||
* The device specific ioctl range is 0x40 to 0x79.
|
||||
*/
|
||||
#define DRM_MGA_INIT 0x00
|
||||
#define DRM_MGA_FLUSH 0x01
|
||||
#define DRM_MGA_RESET 0x02
|
||||
#define DRM_MGA_SWAP 0x03
|
||||
#define DRM_MGA_CLEAR 0x04
|
||||
#define DRM_MGA_VERTEX 0x05
|
||||
#define DRM_MGA_INDICES 0x06
|
||||
#define DRM_MGA_ILOAD 0x07
|
||||
#define DRM_MGA_BLIT 0x08
|
||||
#define DRM_MGA_GETPARAM 0x09
|
||||
|
||||
/* 3.2:
|
||||
* ioctls for operating on fences.
|
||||
*/
|
||||
#define DRM_MGA_SET_FENCE 0x0a
|
||||
#define DRM_MGA_WAIT_FENCE 0x0b
|
||||
#define DRM_MGA_DMA_BOOTSTRAP 0x0c
|
||||
|
||||
#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
|
||||
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
|
||||
#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
|
||||
#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
|
||||
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
|
||||
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
|
||||
#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
|
||||
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
|
||||
#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
|
||||
#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
|
||||
#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, __u32)
|
||||
#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, __u32)
|
||||
#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
|
||||
|
||||
typedef struct _drm_mga_warp_index {
|
||||
int installed;
|
||||
unsigned long phys_addr;
|
||||
int size;
|
||||
} drm_mga_warp_index_t;
|
||||
|
||||
typedef struct drm_mga_init {
|
||||
enum {
|
||||
MGA_INIT_DMA = 0x01,
|
||||
MGA_CLEANUP_DMA = 0x02
|
||||
} func;
|
||||
|
||||
unsigned long sarea_priv_offset;
|
||||
|
||||
__struct_group(/* no tag */, always32bit, /* no attrs */,
|
||||
int chipset;
|
||||
int sgram;
|
||||
|
||||
unsigned int maccess;
|
||||
|
||||
unsigned int fb_cpp;
|
||||
unsigned int front_offset, front_pitch;
|
||||
unsigned int back_offset, back_pitch;
|
||||
|
||||
unsigned int depth_cpp;
|
||||
unsigned int depth_offset, depth_pitch;
|
||||
|
||||
unsigned int texture_offset[MGA_NR_TEX_HEAPS];
|
||||
unsigned int texture_size[MGA_NR_TEX_HEAPS];
|
||||
);
|
||||
|
||||
unsigned long fb_offset;
|
||||
unsigned long mmio_offset;
|
||||
unsigned long status_offset;
|
||||
unsigned long warp_offset;
|
||||
unsigned long primary_offset;
|
||||
unsigned long buffers_offset;
|
||||
} drm_mga_init_t;
|
||||
|
||||
typedef struct drm_mga_dma_bootstrap {
|
||||
/**
|
||||
* \name AGP texture region
|
||||
*
|
||||
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
|
||||
* be filled in with the actual AGP texture settings.
|
||||
*
|
||||
* \warning
|
||||
* If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
|
||||
* is zero, it means that PCI memory (most likely through the use of
|
||||
* an IOMMU) is being used for "AGP" textures.
|
||||
*/
|
||||
/*@{ */
|
||||
unsigned long texture_handle; /**< Handle used to map AGP textures. */
|
||||
__u32 texture_size; /**< Size of the AGP texture region. */
|
||||
/*@} */
|
||||
|
||||
/**
|
||||
* Requested size of the primary DMA region.
|
||||
*
|
||||
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
|
||||
* filled in with the actual AGP mode. If AGP was not available
|
||||
*/
|
||||
__u32 primary_size;
|
||||
|
||||
/**
|
||||
* Requested number of secondary DMA buffers.
|
||||
*
|
||||
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
|
||||
* filled in with the actual number of secondary DMA buffers
|
||||
* allocated. Particularly when PCI DMA is used, this may be
|
||||
* (subtantially) less than the number requested.
|
||||
*/
|
||||
__u32 secondary_bin_count;
|
||||
|
||||
/**
|
||||
* Requested size of each secondary DMA buffer.
|
||||
*
|
||||
* While the kernel \b is free to reduce
|
||||
* dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
|
||||
* to reduce dma_mga_dma_bootstrap::secondary_bin_size.
|
||||
*/
|
||||
__u32 secondary_bin_size;
|
||||
|
||||
/**
|
||||
* Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
|
||||
* \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
|
||||
* zero, it means that PCI DMA should be used, even if AGP is
|
||||
* possible.
|
||||
*
|
||||
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
|
||||
* filled in with the actual AGP mode. If AGP was not available
|
||||
* (i.e., PCI DMA was used), this value will be zero.
|
||||
*/
|
||||
__u32 agp_mode;
|
||||
|
||||
/**
|
||||
* Desired AGP GART size, measured in megabytes.
|
||||
*/
|
||||
__u8 agp_size;
|
||||
} drm_mga_dma_bootstrap_t;
|
||||
|
||||
typedef struct drm_mga_clear {
|
||||
unsigned int flags;
|
||||
unsigned int clear_color;
|
||||
unsigned int clear_depth;
|
||||
unsigned int color_mask;
|
||||
unsigned int depth_mask;
|
||||
} drm_mga_clear_t;
|
||||
|
||||
typedef struct drm_mga_vertex {
|
||||
int idx; /* buffer to queue */
|
||||
int used; /* bytes in use */
|
||||
int discard; /* client finished with buffer? */
|
||||
} drm_mga_vertex_t;
|
||||
|
||||
typedef struct drm_mga_indices {
|
||||
int idx; /* buffer to queue */
|
||||
unsigned int start;
|
||||
unsigned int end;
|
||||
int discard; /* client finished with buffer? */
|
||||
} drm_mga_indices_t;
|
||||
|
||||
typedef struct drm_mga_iload {
|
||||
int idx;
|
||||
unsigned int dstorg;
|
||||
unsigned int length;
|
||||
} drm_mga_iload_t;
|
||||
|
||||
typedef struct _drm_mga_blit {
|
||||
unsigned int planemask;
|
||||
unsigned int srcorg;
|
||||
unsigned int dstorg;
|
||||
int src_pitch, dst_pitch;
|
||||
int delta_sx, delta_sy;
|
||||
int delta_dx, delta_dy;
|
||||
int height, ydir; /* flip image vertically */
|
||||
int source_pitch, dest_pitch;
|
||||
} drm_mga_blit_t;
|
||||
|
||||
/* 3.1: An ioctl to get parameters that aren't available to the 3d
|
||||
* client any other way.
|
||||
*/
|
||||
#define MGA_PARAM_IRQ_NR 1
|
||||
|
||||
/* 3.2: Query the actual card type. The DDX only distinguishes between
|
||||
* G200 chips and non-G200 chips, which it calls G400. It turns out that
|
||||
* there are some very sublte differences between the G4x0 chips and the G550
|
||||
* chips. Using this parameter query, a client-side driver can detect the
|
||||
* difference between a G4x0 and a G550.
|
||||
*/
|
||||
#define MGA_PARAM_CARD_TYPE 2
|
||||
|
||||
typedef struct drm_mga_getparam {
|
||||
int param;
|
||||
void *value;
|
||||
} drm_mga_getparam_t;
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
382
drm/msm_drm.h
Normal file
382
drm/msm_drm.h
Normal file
@ -0,0 +1,382 @@
|
||||
/*
|
||||
* Copyright (C) 2013 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __MSM_DRM_H__
|
||||
#define __MSM_DRM_H__
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Please note that modifications to all structs defined here are
|
||||
* subject to backwards-compatibility constraints:
|
||||
* 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
|
||||
* user/kernel compatibility
|
||||
* 2) Keep fields aligned to their size
|
||||
* 3) Because of how drm_ioctl() works, we can add new fields at
|
||||
* the end of an ioctl if some care is taken: drm_ioctl() will
|
||||
* zero out the new fields at the tail of the ioctl, so a zero
|
||||
* value should have a backwards compatible meaning. And for
|
||||
* output params, userspace won't see the newly added output
|
||||
* fields.. so that has to be somehow ok.
|
||||
*/
|
||||
|
||||
#define MSM_PIPE_NONE 0x00
|
||||
#define MSM_PIPE_2D0 0x01
|
||||
#define MSM_PIPE_2D1 0x02
|
||||
#define MSM_PIPE_3D0 0x10
|
||||
|
||||
/* The pipe-id just uses the lower bits, so can be OR'd with flags in
|
||||
* the upper 16 bits (which could be extended further, if needed, maybe
|
||||
* we extend/overload the pipe-id some day to deal with multiple rings,
|
||||
* but even then I don't think we need the full lower 16 bits).
|
||||
*/
|
||||
#define MSM_PIPE_ID_MASK 0xffff
|
||||
#define MSM_PIPE_ID(x) ((x) & MSM_PIPE_ID_MASK)
|
||||
#define MSM_PIPE_FLAGS(x) ((x) & ~MSM_PIPE_ID_MASK)
|
||||
|
||||
/* timeouts are specified in clock-monotonic absolute times (to simplify
|
||||
* restarting interrupted ioctls). The following struct is logically the
|
||||
* same as 'struct timespec' but 32/64b ABI safe.
|
||||
*/
|
||||
struct drm_msm_timespec {
|
||||
__s64 tv_sec; /* seconds */
|
||||
__s64 tv_nsec; /* nanoseconds */
|
||||
};
|
||||
|
||||
/* Below "RO" indicates a read-only param, "WO" indicates write-only, and
|
||||
* "RW" indicates a param that can be both read (GET_PARAM) and written
|
||||
* (SET_PARAM)
|
||||
*/
|
||||
#define MSM_PARAM_GPU_ID 0x01 /* RO */
|
||||
#define MSM_PARAM_GMEM_SIZE 0x02 /* RO */
|
||||
#define MSM_PARAM_CHIP_ID 0x03 /* RO */
|
||||
#define MSM_PARAM_MAX_FREQ 0x04 /* RO */
|
||||
#define MSM_PARAM_TIMESTAMP 0x05 /* RO */
|
||||
#define MSM_PARAM_GMEM_BASE 0x06 /* RO */
|
||||
#define MSM_PARAM_PRIORITIES 0x07 /* RO: The # of priority levels */
|
||||
#define MSM_PARAM_PP_PGTABLE 0x08 /* RO: Deprecated, always returns zero */
|
||||
#define MSM_PARAM_FAULTS 0x09 /* RO */
|
||||
#define MSM_PARAM_SUSPENDS 0x0a /* RO */
|
||||
#define MSM_PARAM_SYSPROF 0x0b /* WO: 1 preserves perfcntrs, 2 also disables suspend */
|
||||
#define MSM_PARAM_COMM 0x0c /* WO: override for task->comm */
|
||||
#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
|
||||
#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
|
||||
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
|
||||
|
||||
/* For backwards compat. The original support for preemption was based on
|
||||
* a single ring per priority level so # of priority levels equals the #
|
||||
* of rings. With drm/scheduler providing additional levels of priority,
|
||||
* the number of priorities is greater than the # of rings. The param is
|
||||
* renamed to better reflect this.
|
||||
*/
|
||||
#define MSM_PARAM_NR_RINGS MSM_PARAM_PRIORITIES
|
||||
|
||||
struct drm_msm_param {
|
||||
__u32 pipe; /* in, MSM_PIPE_x */
|
||||
__u32 param; /* in, MSM_PARAM_x */
|
||||
__u64 value; /* out (get_param) or in (set_param) */
|
||||
__u32 len; /* zero for non-pointer params */
|
||||
__u32 pad; /* must be zero */
|
||||
};
|
||||
|
||||
/*
|
||||
* GEM buffers:
|
||||
*/
|
||||
|
||||
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
|
||||
#define MSM_BO_GPU_READONLY 0x00000002
|
||||
#define MSM_BO_CACHE_MASK 0x000f0000
|
||||
/* cache modes */
|
||||
#define MSM_BO_CACHED 0x00010000
|
||||
#define MSM_BO_WC 0x00020000
|
||||
#define MSM_BO_UNCACHED 0x00040000 /* deprecated, use MSM_BO_WC */
|
||||
#define MSM_BO_CACHED_COHERENT 0x080000
|
||||
|
||||
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
|
||||
MSM_BO_GPU_READONLY | \
|
||||
MSM_BO_CACHE_MASK)
|
||||
|
||||
struct drm_msm_gem_new {
|
||||
__u64 size; /* in */
|
||||
__u32 flags; /* in, mask of MSM_BO_x */
|
||||
__u32 handle; /* out */
|
||||
};
|
||||
|
||||
/* Get or set GEM buffer info. The requested value can be passed
|
||||
* directly in 'value', or for data larger than 64b 'value' is a
|
||||
* pointer to userspace buffer, with 'len' specifying the number of
|
||||
* bytes copied into that buffer. For info returned by pointer,
|
||||
* calling the GEM_INFO ioctl with null 'value' will return the
|
||||
* required buffer size in 'len'
|
||||
*/
|
||||
#define MSM_INFO_GET_OFFSET 0x00 /* get mmap() offset, returned by value */
|
||||
#define MSM_INFO_GET_IOVA 0x01 /* get iova, returned by value */
|
||||
#define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */
|
||||
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
|
||||
#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
|
||||
|
||||
struct drm_msm_gem_info {
|
||||
__u32 handle; /* in */
|
||||
__u32 info; /* in - one of MSM_INFO_* */
|
||||
__u64 value; /* in or out */
|
||||
__u32 len; /* in or out */
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
#define MSM_PREP_READ 0x01
|
||||
#define MSM_PREP_WRITE 0x02
|
||||
#define MSM_PREP_NOSYNC 0x04
|
||||
|
||||
#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
|
||||
|
||||
struct drm_msm_gem_cpu_prep {
|
||||
__u32 handle; /* in */
|
||||
__u32 op; /* in, mask of MSM_PREP_x */
|
||||
struct drm_msm_timespec timeout; /* in */
|
||||
};
|
||||
|
||||
struct drm_msm_gem_cpu_fini {
|
||||
__u32 handle; /* in */
|
||||
};
|
||||
|
||||
/*
|
||||
* Cmdstream Submission:
|
||||
*/
|
||||
|
||||
/* The value written into the cmdstream is logically:
|
||||
*
|
||||
* ((relocbuf->gpuaddr + reloc_offset) << shift) | or
|
||||
*
|
||||
* When we have GPU's w/ >32bit ptrs, it should be possible to deal
|
||||
* with this by emit'ing two reloc entries with appropriate shift
|
||||
* values. Or a new MSM_SUBMIT_CMD_x type would also be an option.
|
||||
*
|
||||
* NOTE that reloc's must be sorted by order of increasing submit_offset,
|
||||
* otherwise EINVAL.
|
||||
*/
|
||||
struct drm_msm_gem_submit_reloc {
|
||||
__u32 submit_offset; /* in, offset from submit_bo */
|
||||
__u32 or; /* in, value OR'd with result */
|
||||
__s32 shift; /* in, amount of left shift (can be negative) */
|
||||
__u32 reloc_idx; /* in, index of reloc_bo buffer */
|
||||
__u64 reloc_offset; /* in, offset from start of reloc_bo */
|
||||
};
|
||||
|
||||
/* submit-types:
|
||||
* BUF - this cmd buffer is executed normally.
|
||||
* IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are
|
||||
* processed normally, but the kernel does not setup an IB to
|
||||
* this buffer in the first-level ringbuffer
|
||||
* CTX_RESTORE_BUF - only executed if there has been a GPU context
|
||||
* switch since the last SUBMIT ioctl
|
||||
*/
|
||||
#define MSM_SUBMIT_CMD_BUF 0x0001
|
||||
#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
|
||||
#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
|
||||
struct drm_msm_gem_submit_cmd {
|
||||
__u32 type; /* in, one of MSM_SUBMIT_CMD_x */
|
||||
__u32 submit_idx; /* in, index of submit_bo cmdstream buffer */
|
||||
__u32 submit_offset; /* in, offset into submit_bo */
|
||||
__u32 size; /* in, cmdstream size */
|
||||
__u32 pad;
|
||||
__u32 nr_relocs; /* in, number of submit_reloc's */
|
||||
__u64 relocs; /* in, ptr to array of submit_reloc's */
|
||||
};
|
||||
|
||||
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
|
||||
* cmdstream buffer(s) themselves or reloc entries) has one (and only
|
||||
* one) entry in the submit->bos[] table.
|
||||
*
|
||||
* As a optimization, the current buffer (gpu virtual address) can be
|
||||
* passed back through the 'presumed' field. If on a subsequent reloc,
|
||||
* userspace passes back a 'presumed' address that is still valid,
|
||||
* then patching the cmdstream for this entry is skipped. This can
|
||||
* avoid kernel needing to map/access the cmdstream bo in the common
|
||||
* case.
|
||||
*/
|
||||
#define MSM_SUBMIT_BO_READ 0x0001
|
||||
#define MSM_SUBMIT_BO_WRITE 0x0002
|
||||
#define MSM_SUBMIT_BO_DUMP 0x0004
|
||||
|
||||
#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | \
|
||||
MSM_SUBMIT_BO_WRITE | \
|
||||
MSM_SUBMIT_BO_DUMP)
|
||||
|
||||
struct drm_msm_gem_submit_bo {
|
||||
__u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
|
||||
__u32 handle; /* in, GEM handle */
|
||||
__u64 presumed; /* in/out, presumed buffer address */
|
||||
};
|
||||
|
||||
/* Valid submit ioctl flags: */
|
||||
#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
|
||||
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
|
||||
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
|
||||
#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
|
||||
#define MSM_SUBMIT_SYNCOBJ_IN 0x08000000 /* enable input syncobj */
|
||||
#define MSM_SUBMIT_SYNCOBJ_OUT 0x04000000 /* enable output syncobj */
|
||||
#define MSM_SUBMIT_FENCE_SN_IN 0x02000000 /* userspace passes in seqno fence */
|
||||
#define MSM_SUBMIT_FLAGS ( \
|
||||
MSM_SUBMIT_NO_IMPLICIT | \
|
||||
MSM_SUBMIT_FENCE_FD_IN | \
|
||||
MSM_SUBMIT_FENCE_FD_OUT | \
|
||||
MSM_SUBMIT_SUDO | \
|
||||
MSM_SUBMIT_SYNCOBJ_IN | \
|
||||
MSM_SUBMIT_SYNCOBJ_OUT | \
|
||||
MSM_SUBMIT_FENCE_SN_IN | \
|
||||
0)
|
||||
|
||||
#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
|
||||
#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \
|
||||
MSM_SUBMIT_SYNCOBJ_RESET | \
|
||||
0)
|
||||
|
||||
struct drm_msm_gem_submit_syncobj {
|
||||
__u32 handle; /* in, syncobj handle. */
|
||||
__u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
|
||||
__u64 point; /* in, timepoint for timeline syncobjs. */
|
||||
};
|
||||
|
||||
/* Each cmdstream submit consists of a table of buffers involved, and
|
||||
* one or more cmdstream buffers. This allows for conditional execution
|
||||
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
|
||||
*/
|
||||
struct drm_msm_gem_submit {
|
||||
__u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
|
||||
__u32 fence; /* out (or in with MSM_SUBMIT_FENCE_SN_IN flag) */
|
||||
__u32 nr_bos; /* in, number of submit_bo's */
|
||||
__u32 nr_cmds; /* in, number of submit_cmd's */
|
||||
__u64 bos; /* in, ptr to array of submit_bo's */
|
||||
__u64 cmds; /* in, ptr to array of submit_cmd's */
|
||||
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
|
||||
__u32 queueid; /* in, submitqueue id */
|
||||
__u64 in_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
|
||||
__u64 out_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
|
||||
__u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
|
||||
__u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
|
||||
__u32 syncobj_stride; /* in, stride of syncobj arrays. */
|
||||
__u32 pad; /*in, reserved for future use, always 0. */
|
||||
|
||||
};
|
||||
|
||||
/* The normal way to synchronize with the GPU is just to CPU_PREP on
|
||||
* a buffer if you need to access it from the CPU (other cmdstream
|
||||
* submission from same or other contexts, PAGE_FLIP ioctl, etc, all
|
||||
* handle the required synchronization under the hood). This ioctl
|
||||
* mainly just exists as a way to implement the gallium pipe_fence
|
||||
* APIs without requiring a dummy bo to synchronize on.
|
||||
*/
|
||||
struct drm_msm_wait_fence {
|
||||
__u32 fence; /* in */
|
||||
__u32 pad;
|
||||
struct drm_msm_timespec timeout; /* in */
|
||||
__u32 queueid; /* in, submitqueue id */
|
||||
};
|
||||
|
||||
/* madvise provides a way to tell the kernel in case a buffers contents
|
||||
* can be discarded under memory pressure, which is useful for userspace
|
||||
* bo cache where we want to optimistically hold on to buffer allocate
|
||||
* and potential mmap, but allow the pages to be discarded under memory
|
||||
* pressure.
|
||||
*
|
||||
* Typical usage would involve madvise(DONTNEED) when buffer enters BO
|
||||
* cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
|
||||
* In the WILLNEED case, 'retained' indicates to userspace whether the
|
||||
* backing pages still exist.
|
||||
*/
|
||||
#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
|
||||
#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
|
||||
#define __MSM_MADV_PURGED 2 /* internal state */
|
||||
|
||||
struct drm_msm_gem_madvise {
|
||||
__u32 handle; /* in, GEM handle */
|
||||
__u32 madv; /* in, MSM_MADV_x */
|
||||
__u32 retained; /* out, whether backing store still exists */
|
||||
};
|
||||
|
||||
/*
|
||||
* Draw queues allow the user to set specific submission parameter. Command
|
||||
* submissions specify a specific submitqueue to use. ID 0 is reserved for
|
||||
* backwards compatibility as a "default" submitqueue
|
||||
*/
|
||||
|
||||
#define MSM_SUBMITQUEUE_FLAGS (0)
|
||||
|
||||
/*
|
||||
* The submitqueue priority should be between 0 and MSM_PARAM_PRIORITIES-1,
|
||||
* a lower numeric value is higher priority.
|
||||
*/
|
||||
struct drm_msm_submitqueue {
|
||||
__u32 flags; /* in, MSM_SUBMITQUEUE_x */
|
||||
__u32 prio; /* in, Priority level */
|
||||
__u32 id; /* out, identifier */
|
||||
};
|
||||
|
||||
#define MSM_SUBMITQUEUE_PARAM_FAULTS 0
|
||||
|
||||
struct drm_msm_submitqueue_query {
|
||||
__u64 data;
|
||||
__u32 id;
|
||||
__u32 param;
|
||||
__u32 len;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
#define DRM_MSM_GET_PARAM 0x00
|
||||
#define DRM_MSM_SET_PARAM 0x01
|
||||
#define DRM_MSM_GEM_NEW 0x02
|
||||
#define DRM_MSM_GEM_INFO 0x03
|
||||
#define DRM_MSM_GEM_CPU_PREP 0x04
|
||||
#define DRM_MSM_GEM_CPU_FINI 0x05
|
||||
#define DRM_MSM_GEM_SUBMIT 0x06
|
||||
#define DRM_MSM_WAIT_FENCE 0x07
|
||||
#define DRM_MSM_GEM_MADVISE 0x08
|
||||
/* placeholder:
|
||||
#define DRM_MSM_GEM_SVM_NEW 0x09
|
||||
*/
|
||||
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
|
||||
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
|
||||
#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
|
||||
|
||||
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
|
||||
#define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param)
|
||||
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
|
||||
#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
|
||||
#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
|
||||
#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
|
||||
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
|
||||
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
|
||||
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
|
||||
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
|
||||
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
|
||||
#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __MSM_DRM_H__ */
|
204
drm/nouveau_drm.h
Normal file
204
drm/nouveau_drm.h
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
* Copyright 2005 Stephane Marchesin.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NOUVEAU_DRM_H__
|
||||
#define __NOUVEAU_DRM_H__
|
||||
|
||||
#define DRM_NOUVEAU_EVENT_NVIF 0x80000000
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
|
||||
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
|
||||
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
|
||||
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
|
||||
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
|
||||
|
||||
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
|
||||
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
|
||||
#define NOUVEAU_GEM_TILE_16BPP 0x00000001
|
||||
#define NOUVEAU_GEM_TILE_32BPP 0x00000002
|
||||
#define NOUVEAU_GEM_TILE_ZETA 0x00000004
|
||||
#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
|
||||
|
||||
struct drm_nouveau_gem_info {
|
||||
__u32 handle;
|
||||
__u32 domain;
|
||||
__u64 size;
|
||||
__u64 offset;
|
||||
__u64 map_handle;
|
||||
__u32 tile_mode;
|
||||
__u32 tile_flags;
|
||||
};
|
||||
|
||||
struct drm_nouveau_gem_new {
|
||||
struct drm_nouveau_gem_info info;
|
||||
__u32 channel_hint;
|
||||
__u32 align;
|
||||
};
|
||||
|
||||
#define NOUVEAU_GEM_MAX_BUFFERS 1024
|
||||
struct drm_nouveau_gem_pushbuf_bo_presumed {
|
||||
__u32 valid;
|
||||
__u32 domain;
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
struct drm_nouveau_gem_pushbuf_bo {
|
||||
__u64 user_priv;
|
||||
__u32 handle;
|
||||
__u32 read_domains;
|
||||
__u32 write_domains;
|
||||
__u32 valid_domains;
|
||||
struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
|
||||
};
|
||||
|
||||
#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
|
||||
#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
|
||||
#define NOUVEAU_GEM_RELOC_OR (1 << 2)
|
||||
#define NOUVEAU_GEM_MAX_RELOCS 1024
|
||||
struct drm_nouveau_gem_pushbuf_reloc {
|
||||
__u32 reloc_bo_index;
|
||||
__u32 reloc_bo_offset;
|
||||
__u32 bo_index;
|
||||
__u32 flags;
|
||||
__u32 data;
|
||||
__u32 vor;
|
||||
__u32 tor;
|
||||
};
|
||||
|
||||
#define NOUVEAU_GEM_MAX_PUSH 512
|
||||
struct drm_nouveau_gem_pushbuf_push {
|
||||
__u32 bo_index;
|
||||
__u32 pad;
|
||||
__u64 offset;
|
||||
__u64 length;
|
||||
};
|
||||
|
||||
struct drm_nouveau_gem_pushbuf {
|
||||
__u32 channel;
|
||||
__u32 nr_buffers;
|
||||
__u64 buffers;
|
||||
__u32 nr_relocs;
|
||||
__u32 nr_push;
|
||||
__u64 relocs;
|
||||
__u64 push;
|
||||
__u32 suffix0;
|
||||
__u32 suffix1;
|
||||
#define NOUVEAU_GEM_PUSHBUF_SYNC (1ULL << 0)
|
||||
__u64 vram_available;
|
||||
__u64 gart_available;
|
||||
};
|
||||
|
||||
#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
|
||||
#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
|
||||
struct drm_nouveau_gem_cpu_prep {
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct drm_nouveau_gem_cpu_fini {
|
||||
__u32 handle;
|
||||
};
|
||||
|
||||
#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
|
||||
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
|
||||
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
|
||||
#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
|
||||
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
|
||||
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
|
||||
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
|
||||
#define DRM_NOUVEAU_NVIF 0x07
|
||||
#define DRM_NOUVEAU_SVM_INIT 0x08
|
||||
#define DRM_NOUVEAU_SVM_BIND 0x09
|
||||
#define DRM_NOUVEAU_GEM_NEW 0x40
|
||||
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
|
||||
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
|
||||
#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
|
||||
#define DRM_NOUVEAU_GEM_INFO 0x44
|
||||
|
||||
struct drm_nouveau_svm_init {
|
||||
__u64 unmanaged_addr;
|
||||
__u64 unmanaged_size;
|
||||
};
|
||||
|
||||
struct drm_nouveau_svm_bind {
|
||||
__u64 header;
|
||||
__u64 va_start;
|
||||
__u64 va_end;
|
||||
__u64 npages;
|
||||
__u64 stride;
|
||||
__u64 result;
|
||||
__u64 reserved0;
|
||||
__u64 reserved1;
|
||||
};
|
||||
|
||||
#define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
|
||||
#define NOUVEAU_SVM_BIND_COMMAND_BITS 8
|
||||
#define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
|
||||
#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
|
||||
#define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
|
||||
#define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
|
||||
#define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
|
||||
#define NOUVEAU_SVM_BIND_TARGET_BITS 32
|
||||
#define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
|
||||
|
||||
/*
|
||||
* Below is use to validate ioctl argument, userspace can also use it to make
|
||||
* sure that no bit are set beyond known fields for a given kernel version.
|
||||
*/
|
||||
#define NOUVEAU_SVM_BIND_VALID_BITS 48
|
||||
#define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
|
||||
|
||||
|
||||
/*
|
||||
* NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
|
||||
* result: number of page successfuly migrate to the target memory.
|
||||
*/
|
||||
#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
|
||||
|
||||
/*
|
||||
* NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
|
||||
*/
|
||||
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
|
||||
|
||||
|
||||
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
|
||||
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
|
||||
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __NOUVEAU_DRM_H__ */
|
126
drm/omap_drm.h
Normal file
126
drm/omap_drm.h
Normal file
@ -0,0 +1,126 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* include/uapi/drm/omap_drm.h
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __OMAP_DRM_H__
|
||||
#define __OMAP_DRM_H__
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Please note that modifications to all structs defined here are
|
||||
* subject to backwards-compatibility constraints.
|
||||
*/
|
||||
|
||||
#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
|
||||
|
||||
struct drm_omap_param {
|
||||
__u64 param; /* in */
|
||||
__u64 value; /* in (set_param), out (get_param) */
|
||||
};
|
||||
|
||||
/* Scanout buffer, consumable by DSS */
|
||||
#define OMAP_BO_SCANOUT 0x00000001
|
||||
|
||||
/* Buffer CPU caching mode: cached, write-combining or uncached. */
|
||||
#define OMAP_BO_CACHED 0x00000000
|
||||
#define OMAP_BO_WC 0x00000002
|
||||
#define OMAP_BO_UNCACHED 0x00000004
|
||||
#define OMAP_BO_CACHE_MASK 0x00000006
|
||||
|
||||
/* Use TILER for the buffer. The TILER container unit can be 8, 16 or 32 bits. */
|
||||
#define OMAP_BO_TILED_8 0x00000100
|
||||
#define OMAP_BO_TILED_16 0x00000200
|
||||
#define OMAP_BO_TILED_32 0x00000300
|
||||
#define OMAP_BO_TILED_MASK 0x00000f00
|
||||
|
||||
union omap_gem_size {
|
||||
__u32 bytes; /* (for non-tiled formats) */
|
||||
struct {
|
||||
__u16 width;
|
||||
__u16 height;
|
||||
} tiled; /* (for tiled formats) */
|
||||
};
|
||||
|
||||
struct drm_omap_gem_new {
|
||||
union omap_gem_size size; /* in */
|
||||
__u32 flags; /* in */
|
||||
__u32 handle; /* out */
|
||||
__u32 __pad;
|
||||
};
|
||||
|
||||
/* mask of operations: */
|
||||
enum omap_gem_op {
|
||||
OMAP_GEM_READ = 0x01,
|
||||
OMAP_GEM_WRITE = 0x02,
|
||||
};
|
||||
|
||||
struct drm_omap_gem_cpu_prep {
|
||||
__u32 handle; /* buffer handle (in) */
|
||||
__u32 op; /* mask of omap_gem_op (in) */
|
||||
};
|
||||
|
||||
struct drm_omap_gem_cpu_fini {
|
||||
__u32 handle; /* buffer handle (in) */
|
||||
__u32 op; /* mask of omap_gem_op (in) */
|
||||
/* TODO maybe here we pass down info about what regions are touched
|
||||
* by sw so we can be clever about cache ops? For now a placeholder,
|
||||
* set to zero and we just do full buffer flush..
|
||||
*/
|
||||
__u32 nregions;
|
||||
__u32 __pad;
|
||||
};
|
||||
|
||||
struct drm_omap_gem_info {
|
||||
__u32 handle; /* buffer handle (in) */
|
||||
__u32 pad;
|
||||
__u64 offset; /* mmap offset (out) */
|
||||
/* note: in case of tiled buffers, the user virtual size can be
|
||||
* different from the physical size (ie. how many pages are needed
|
||||
* to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
|
||||
* This size here is the one that should be used if you want to
|
||||
* mmap() the buffer:
|
||||
*/
|
||||
__u32 size; /* virtual size for mmap'ing (out) */
|
||||
__u32 __pad;
|
||||
};
|
||||
|
||||
#define DRM_OMAP_GET_PARAM 0x00
|
||||
#define DRM_OMAP_SET_PARAM 0x01
|
||||
#define DRM_OMAP_GEM_NEW 0x03
|
||||
#define DRM_OMAP_GEM_CPU_PREP 0x04 /* Deprecated, to be removed */
|
||||
#define DRM_OMAP_GEM_CPU_FINI 0x05 /* Deprecated, to be removed */
|
||||
#define DRM_OMAP_GEM_INFO 0x06
|
||||
#define DRM_OMAP_NUM_IOCTLS 0x07
|
||||
|
||||
#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
|
||||
#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
|
||||
#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
|
||||
#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
|
||||
#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
|
||||
#define DRM_IOCTL_OMAP_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __OMAP_DRM_H__ */
|
282
drm/panfrost_drm.h
Normal file
282
drm/panfrost_drm.h
Normal file
@ -0,0 +1,282 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2014-2018 Broadcom
|
||||
* Copyright © 2019 Collabora ltd.
|
||||
*/
|
||||
#ifndef _PANFROST_DRM_H_
|
||||
#define _PANFROST_DRM_H_
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define DRM_PANFROST_SUBMIT 0x00
|
||||
#define DRM_PANFROST_WAIT_BO 0x01
|
||||
#define DRM_PANFROST_CREATE_BO 0x02
|
||||
#define DRM_PANFROST_MMAP_BO 0x03
|
||||
#define DRM_PANFROST_GET_PARAM 0x04
|
||||
#define DRM_PANFROST_GET_BO_OFFSET 0x05
|
||||
#define DRM_PANFROST_PERFCNT_ENABLE 0x06
|
||||
#define DRM_PANFROST_PERFCNT_DUMP 0x07
|
||||
#define DRM_PANFROST_MADVISE 0x08
|
||||
|
||||
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
|
||||
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
|
||||
#define DRM_IOCTL_PANFROST_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_CREATE_BO, struct drm_panfrost_create_bo)
|
||||
#define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
|
||||
#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
|
||||
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
|
||||
#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
|
||||
|
||||
/*
|
||||
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
|
||||
* param is set to true.
|
||||
* All these ioctl(s) are subject to deprecation, so please don't rely on
|
||||
* them for anything but debugging purpose.
|
||||
*/
|
||||
#define DRM_IOCTL_PANFROST_PERFCNT_ENABLE DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_PERFCNT_ENABLE, struct drm_panfrost_perfcnt_enable)
|
||||
#define DRM_IOCTL_PANFROST_PERFCNT_DUMP DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_PERFCNT_DUMP, struct drm_panfrost_perfcnt_dump)
|
||||
|
||||
#define PANFROST_JD_REQ_FS (1 << 0)
|
||||
/**
|
||||
* struct drm_panfrost_submit - ioctl argument for submitting commands to the 3D
|
||||
* engine.
|
||||
*
|
||||
* This asks the kernel to have the GPU execute a render command list.
|
||||
*/
|
||||
struct drm_panfrost_submit {
|
||||
|
||||
/** Address to GPU mapping of job descriptor */
|
||||
__u64 jc;
|
||||
|
||||
/** An optional array of sync objects to wait on before starting this job. */
|
||||
__u64 in_syncs;
|
||||
|
||||
/** Number of sync objects to wait on before starting this job. */
|
||||
__u32 in_sync_count;
|
||||
|
||||
/** An optional sync object to place the completion fence in. */
|
||||
__u32 out_sync;
|
||||
|
||||
/** Pointer to a u32 array of the BOs that are referenced by the job. */
|
||||
__u64 bo_handles;
|
||||
|
||||
/** Number of BO handles passed in (size is that times 4). */
|
||||
__u32 bo_handle_count;
|
||||
|
||||
/** A combination of PANFROST_JD_REQ_* */
|
||||
__u32 requirements;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_panfrost_wait_bo - ioctl argument for waiting for
|
||||
* completion of the last DRM_PANFROST_SUBMIT on a BO.
|
||||
*
|
||||
* This is useful for cases where multiple processes might be
|
||||
* rendering to a BO and you want to wait for all rendering to be
|
||||
* completed.
|
||||
*/
|
||||
struct drm_panfrost_wait_bo {
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
__s64 timeout_ns; /* absolute */
|
||||
};
|
||||
|
||||
/* Valid flags to pass to drm_panfrost_create_bo */
|
||||
#define PANFROST_BO_NOEXEC 1
|
||||
#define PANFROST_BO_HEAP 2
|
||||
|
||||
/**
|
||||
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
|
||||
*
|
||||
* The flags argument is a bit mask of PANFROST_BO_* flags.
|
||||
*/
|
||||
struct drm_panfrost_create_bo {
|
||||
__u32 size;
|
||||
__u32 flags;
|
||||
/** Returned GEM handle for the BO. */
|
||||
__u32 handle;
|
||||
/* Pad, must be zero-filled. */
|
||||
__u32 pad;
|
||||
/**
|
||||
* Returned offset for the BO in the GPU address space. This offset
|
||||
* is private to the DRM fd and is valid for the lifetime of the GEM
|
||||
* handle.
|
||||
*
|
||||
* This offset value will always be nonzero, since various HW
|
||||
* units treat 0 specially.
|
||||
*/
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_panfrost_mmap_bo - ioctl argument for mapping Panfrost BOs.
|
||||
*
|
||||
* This doesn't actually perform an mmap. Instead, it returns the
|
||||
* offset you need to use in an mmap on the DRM device node. This
|
||||
* means that tools like valgrind end up knowing about the mapped
|
||||
* memory.
|
||||
*
|
||||
* There are currently no values for the flags argument, but it may be
|
||||
* used in a future extension.
|
||||
*/
|
||||
struct drm_panfrost_mmap_bo {
|
||||
/** Handle for the object being mapped. */
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
/** offset into the drm node to use for subsequent mmap call. */
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
enum drm_panfrost_param {
|
||||
DRM_PANFROST_PARAM_GPU_PROD_ID,
|
||||
DRM_PANFROST_PARAM_GPU_REVISION,
|
||||
DRM_PANFROST_PARAM_SHADER_PRESENT,
|
||||
DRM_PANFROST_PARAM_TILER_PRESENT,
|
||||
DRM_PANFROST_PARAM_L2_PRESENT,
|
||||
DRM_PANFROST_PARAM_STACK_PRESENT,
|
||||
DRM_PANFROST_PARAM_AS_PRESENT,
|
||||
DRM_PANFROST_PARAM_JS_PRESENT,
|
||||
DRM_PANFROST_PARAM_L2_FEATURES,
|
||||
DRM_PANFROST_PARAM_CORE_FEATURES,
|
||||
DRM_PANFROST_PARAM_TILER_FEATURES,
|
||||
DRM_PANFROST_PARAM_MEM_FEATURES,
|
||||
DRM_PANFROST_PARAM_MMU_FEATURES,
|
||||
DRM_PANFROST_PARAM_THREAD_FEATURES,
|
||||
DRM_PANFROST_PARAM_MAX_THREADS,
|
||||
DRM_PANFROST_PARAM_THREAD_MAX_WORKGROUP_SZ,
|
||||
DRM_PANFROST_PARAM_THREAD_MAX_BARRIER_SZ,
|
||||
DRM_PANFROST_PARAM_COHERENCY_FEATURES,
|
||||
DRM_PANFROST_PARAM_TEXTURE_FEATURES0,
|
||||
DRM_PANFROST_PARAM_TEXTURE_FEATURES1,
|
||||
DRM_PANFROST_PARAM_TEXTURE_FEATURES2,
|
||||
DRM_PANFROST_PARAM_TEXTURE_FEATURES3,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES0,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES1,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES2,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES3,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES4,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES5,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES6,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES7,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES8,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES9,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES10,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES11,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES12,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES13,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES14,
|
||||
DRM_PANFROST_PARAM_JS_FEATURES15,
|
||||
DRM_PANFROST_PARAM_NR_CORE_GROUPS,
|
||||
DRM_PANFROST_PARAM_THREAD_TLS_ALLOC,
|
||||
DRM_PANFROST_PARAM_AFBC_FEATURES,
|
||||
};
|
||||
|
||||
struct drm_panfrost_get_param {
|
||||
__u32 param;
|
||||
__u32 pad;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the offset for the BO in the GPU address space for this DRM fd.
|
||||
* This is the same value returned by drm_panfrost_create_bo, if that was called
|
||||
* from this DRM fd.
|
||||
*/
|
||||
struct drm_panfrost_get_bo_offset {
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
struct drm_panfrost_perfcnt_enable {
|
||||
__u32 enable;
|
||||
/*
|
||||
* On bifrost we have 2 sets of counters, this parameter defines the
|
||||
* one to track.
|
||||
*/
|
||||
__u32 counterset;
|
||||
};
|
||||
|
||||
struct drm_panfrost_perfcnt_dump {
|
||||
__u64 buf_ptr;
|
||||
};
|
||||
|
||||
/* madvise provides a way to tell the kernel in case a buffers contents
|
||||
* can be discarded under memory pressure, which is useful for userspace
|
||||
* bo cache where we want to optimistically hold on to buffer allocate
|
||||
* and potential mmap, but allow the pages to be discarded under memory
|
||||
* pressure.
|
||||
*
|
||||
* Typical usage would involve madvise(DONTNEED) when buffer enters BO
|
||||
* cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
|
||||
* In the WILLNEED case, 'retained' indicates to userspace whether the
|
||||
* backing pages still exist.
|
||||
*/
|
||||
#define PANFROST_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
|
||||
#define PANFROST_MADV_DONTNEED 1 /* backing pages not needed */
|
||||
|
||||
struct drm_panfrost_madvise {
|
||||
__u32 handle; /* in, GEM handle */
|
||||
__u32 madv; /* in, PANFROST_MADV_x */
|
||||
__u32 retained; /* out, whether backing store still exists */
|
||||
};
|
||||
|
||||
/* Definitions for coredump decoding in user space */
|
||||
#define PANFROSTDUMP_MAJOR 1
|
||||
#define PANFROSTDUMP_MINOR 0
|
||||
|
||||
#define PANFROSTDUMP_MAGIC 0x464E4150 /* PANF */
|
||||
|
||||
#define PANFROSTDUMP_BUF_REG 0
|
||||
#define PANFROSTDUMP_BUF_BOMAP (PANFROSTDUMP_BUF_REG + 1)
|
||||
#define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1)
|
||||
#define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1)
|
||||
|
||||
/*
|
||||
* This structure is the native endianness of the dumping machine, tools can
|
||||
* detect the endianness by looking at the value in 'magic'.
|
||||
*/
|
||||
struct panfrost_dump_object_header {
|
||||
__u32 magic;
|
||||
__u32 type;
|
||||
__u32 file_size;
|
||||
__u32 file_offset;
|
||||
|
||||
union {
|
||||
struct {
|
||||
__u64 jc;
|
||||
__u32 gpu_id;
|
||||
__u32 major;
|
||||
__u32 minor;
|
||||
__u64 nbos;
|
||||
} reghdr;
|
||||
|
||||
struct {
|
||||
__u32 valid;
|
||||
__u64 iova;
|
||||
__u32 data[2];
|
||||
} bomap;
|
||||
|
||||
/*
|
||||
* Force same size in case we want to expand the header
|
||||
* with new fields and also keep it 512-byte aligned
|
||||
*/
|
||||
|
||||
__u32 sizer[496];
|
||||
};
|
||||
};
|
||||
|
||||
/* Registers object, an array of these */
|
||||
struct panfrost_dump_registers {
|
||||
__u32 reg;
|
||||
__u32 value;
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _PANFROST_DRM_H_ */
|
158
drm/qxl_drm.h
Normal file
158
drm/qxl_drm.h
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Copyright 2013 Red Hat
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef QXL_DRM_H
|
||||
#define QXL_DRM_H
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Please note that modifications to all structs defined here are
|
||||
* subject to backwards-compatibility constraints.
|
||||
*
|
||||
* Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
|
||||
* compatibility Keep fields aligned to their size
|
||||
*/
|
||||
|
||||
#define QXL_GEM_DOMAIN_CPU 0
|
||||
#define QXL_GEM_DOMAIN_VRAM 1
|
||||
#define QXL_GEM_DOMAIN_SURFACE 2
|
||||
|
||||
#define DRM_QXL_ALLOC 0x00
|
||||
#define DRM_QXL_MAP 0x01
|
||||
#define DRM_QXL_EXECBUFFER 0x02
|
||||
#define DRM_QXL_UPDATE_AREA 0x03
|
||||
#define DRM_QXL_GETPARAM 0x04
|
||||
#define DRM_QXL_CLIENTCAP 0x05
|
||||
|
||||
#define DRM_QXL_ALLOC_SURF 0x06
|
||||
|
||||
struct drm_qxl_alloc {
|
||||
__u32 size;
|
||||
__u32 handle; /* 0 is an invalid handle */
|
||||
};
|
||||
|
||||
struct drm_qxl_map {
|
||||
__u64 offset; /* use for mmap system call */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
/*
|
||||
* dest is the bo we are writing the relocation into
|
||||
* src is bo we are relocating.
|
||||
* *(dest_handle.base_addr + dest_offset) = physical_address(src_handle.addr +
|
||||
* src_offset)
|
||||
*/
|
||||
#define QXL_RELOC_TYPE_BO 1
|
||||
#define QXL_RELOC_TYPE_SURF 2
|
||||
|
||||
struct drm_qxl_reloc {
|
||||
__u64 src_offset; /* offset into src_handle or src buffer */
|
||||
__u64 dst_offset; /* offset in dest handle */
|
||||
__u32 src_handle; /* dest handle to compute address from */
|
||||
__u32 dst_handle; /* 0 if to command buffer */
|
||||
__u32 reloc_type;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_qxl_command {
|
||||
__u64 command; /* void* */
|
||||
__u64 relocs; /* struct drm_qxl_reloc* */
|
||||
__u32 type;
|
||||
__u32 command_size;
|
||||
__u32 relocs_num;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_qxl_execbuffer {
|
||||
__u32 flags; /* for future use */
|
||||
__u32 commands_num;
|
||||
__u64 commands; /* struct drm_qxl_command* */
|
||||
};
|
||||
|
||||
struct drm_qxl_update_area {
|
||||
__u32 handle;
|
||||
__u32 top;
|
||||
__u32 left;
|
||||
__u32 bottom;
|
||||
__u32 right;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
|
||||
#define QXL_PARAM_MAX_RELOCS 2
|
||||
struct drm_qxl_getparam {
|
||||
__u64 param;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
/* these are one bit values */
|
||||
struct drm_qxl_clientcap {
|
||||
__u32 index;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_qxl_alloc_surf {
|
||||
__u32 format;
|
||||
__u32 width;
|
||||
__u32 height;
|
||||
__s32 stride;
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
#define DRM_IOCTL_QXL_ALLOC \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC, struct drm_qxl_alloc)
|
||||
|
||||
#define DRM_IOCTL_QXL_MAP \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_MAP, struct drm_qxl_map)
|
||||
|
||||
#define DRM_IOCTL_QXL_EXECBUFFER \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_EXECBUFFER,\
|
||||
struct drm_qxl_execbuffer)
|
||||
|
||||
#define DRM_IOCTL_QXL_UPDATE_AREA \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_UPDATE_AREA,\
|
||||
struct drm_qxl_update_area)
|
||||
|
||||
#define DRM_IOCTL_QXL_GETPARAM \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_GETPARAM,\
|
||||
struct drm_qxl_getparam)
|
||||
|
||||
#define DRM_IOCTL_QXL_CLIENTCAP \
|
||||
DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_CLIENTCAP,\
|
||||
struct drm_qxl_clientcap)
|
||||
|
||||
#define DRM_IOCTL_QXL_ALLOC_SURF \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
|
||||
struct drm_qxl_alloc_surf)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
336
drm/r128_drm.h
Normal file
336
drm/r128_drm.h
Normal file
@ -0,0 +1,336 @@
|
||||
/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
|
||||
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
|
||||
*/
|
||||
/*
|
||||
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
* Kevin E. Martin <martin@valinux.com>
|
||||
*/
|
||||
|
||||
#ifndef __R128_DRM_H__
|
||||
#define __R128_DRM_H__
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* WARNING: If you change any of these defines, make sure to change the
|
||||
* defines in the X server file (r128_sarea.h)
|
||||
*/
|
||||
#ifndef __R128_SAREA_DEFINES__
|
||||
#define __R128_SAREA_DEFINES__
|
||||
|
||||
/* What needs to be changed for the current vertex buffer?
|
||||
*/
|
||||
#define R128_UPLOAD_CONTEXT 0x001
|
||||
#define R128_UPLOAD_SETUP 0x002
|
||||
#define R128_UPLOAD_TEX0 0x004
|
||||
#define R128_UPLOAD_TEX1 0x008
|
||||
#define R128_UPLOAD_TEX0IMAGES 0x010
|
||||
#define R128_UPLOAD_TEX1IMAGES 0x020
|
||||
#define R128_UPLOAD_CORE 0x040
|
||||
#define R128_UPLOAD_MASKS 0x080
|
||||
#define R128_UPLOAD_WINDOW 0x100
|
||||
#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
|
||||
#define R128_REQUIRE_QUIESCENCE 0x400
|
||||
#define R128_UPLOAD_ALL 0x7ff
|
||||
|
||||
#define R128_FRONT 0x1
|
||||
#define R128_BACK 0x2
|
||||
#define R128_DEPTH 0x4
|
||||
|
||||
/* Primitive types
|
||||
*/
|
||||
#define R128_POINTS 0x1
|
||||
#define R128_LINES 0x2
|
||||
#define R128_LINE_STRIP 0x3
|
||||
#define R128_TRIANGLES 0x4
|
||||
#define R128_TRIANGLE_FAN 0x5
|
||||
#define R128_TRIANGLE_STRIP 0x6
|
||||
|
||||
/* Vertex/indirect buffer size
|
||||
*/
|
||||
#define R128_BUFFER_SIZE 16384
|
||||
|
||||
/* Byte offsets for indirect buffer data
|
||||
*/
|
||||
#define R128_INDEX_PRIM_OFFSET 20
|
||||
#define R128_HOSTDATA_BLIT_OFFSET 32
|
||||
|
||||
/* Keep these small for testing.
|
||||
*/
|
||||
#define R128_NR_SAREA_CLIPRECTS 12
|
||||
|
||||
/* There are 2 heaps (local/AGP). Each region within a heap is a
|
||||
* minimum of 64k, and there are at most 64 of them per heap.
|
||||
*/
|
||||
#define R128_LOCAL_TEX_HEAP 0
|
||||
#define R128_AGP_TEX_HEAP 1
|
||||
#define R128_NR_TEX_HEAPS 2
|
||||
#define R128_NR_TEX_REGIONS 64
|
||||
#define R128_LOG_TEX_GRANULARITY 16
|
||||
|
||||
#define R128_NR_CONTEXT_REGS 12
|
||||
|
||||
#define R128_MAX_TEXTURE_LEVELS 11
|
||||
#define R128_MAX_TEXTURE_UNITS 2
|
||||
|
||||
#endif /* __R128_SAREA_DEFINES__ */
|
||||
|
||||
typedef struct {
|
||||
/* Context state - can be written in one large chunk */
|
||||
unsigned int dst_pitch_offset_c;
|
||||
unsigned int dp_gui_master_cntl_c;
|
||||
unsigned int sc_top_left_c;
|
||||
unsigned int sc_bottom_right_c;
|
||||
unsigned int z_offset_c;
|
||||
unsigned int z_pitch_c;
|
||||
unsigned int z_sten_cntl_c;
|
||||
unsigned int tex_cntl_c;
|
||||
unsigned int misc_3d_state_cntl_reg;
|
||||
unsigned int texture_clr_cmp_clr_c;
|
||||
unsigned int texture_clr_cmp_msk_c;
|
||||
unsigned int fog_color_c;
|
||||
|
||||
/* Texture state */
|
||||
unsigned int tex_size_pitch_c;
|
||||
unsigned int constant_color_c;
|
||||
|
||||
/* Setup state */
|
||||
unsigned int pm4_vc_fpu_setup;
|
||||
unsigned int setup_cntl;
|
||||
|
||||
/* Mask state */
|
||||
unsigned int dp_write_mask;
|
||||
unsigned int sten_ref_mask_c;
|
||||
unsigned int plane_3d_mask_c;
|
||||
|
||||
/* Window state */
|
||||
unsigned int window_xy_offset;
|
||||
|
||||
/* Core state */
|
||||
unsigned int scale_3d_cntl;
|
||||
} drm_r128_context_regs_t;
|
||||
|
||||
/* Setup registers for each texture unit
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned int tex_cntl;
|
||||
unsigned int tex_combine_cntl;
|
||||
unsigned int tex_size_pitch;
|
||||
unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
|
||||
unsigned int tex_border_color;
|
||||
} drm_r128_texture_regs_t;
|
||||
|
||||
typedef struct drm_r128_sarea {
|
||||
/* The channel for communication of state information to the kernel
|
||||
* on firing a vertex buffer.
|
||||
*/
|
||||
drm_r128_context_regs_t context_state;
|
||||
drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
|
||||
unsigned int dirty;
|
||||
unsigned int vertsize;
|
||||
unsigned int vc_format;
|
||||
|
||||
/* The current cliprects, or a subset thereof.
|
||||
*/
|
||||
struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
|
||||
unsigned int nbox;
|
||||
|
||||
/* Counters for client-side throttling of rendering clients.
|
||||
*/
|
||||
unsigned int last_frame;
|
||||
unsigned int last_dispatch;
|
||||
|
||||
struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
|
||||
unsigned int tex_age[R128_NR_TEX_HEAPS];
|
||||
int ctx_owner;
|
||||
int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
|
||||
int pfCurrentPage; /* which buffer is being displayed? */
|
||||
} drm_r128_sarea_t;
|
||||
|
||||
/* WARNING: If you change any of these defines, make sure to change the
|
||||
* defines in the Xserver file (xf86drmR128.h)
|
||||
*/
|
||||
|
||||
/* Rage 128 specific ioctls
|
||||
* The device specific ioctl range is 0x40 to 0x79.
|
||||
*/
|
||||
#define DRM_R128_INIT 0x00
|
||||
#define DRM_R128_CCE_START 0x01
|
||||
#define DRM_R128_CCE_STOP 0x02
|
||||
#define DRM_R128_CCE_RESET 0x03
|
||||
#define DRM_R128_CCE_IDLE 0x04
|
||||
/* 0x05 not used */
|
||||
#define DRM_R128_RESET 0x06
|
||||
#define DRM_R128_SWAP 0x07
|
||||
#define DRM_R128_CLEAR 0x08
|
||||
#define DRM_R128_VERTEX 0x09
|
||||
#define DRM_R128_INDICES 0x0a
|
||||
#define DRM_R128_BLIT 0x0b
|
||||
#define DRM_R128_DEPTH 0x0c
|
||||
#define DRM_R128_STIPPLE 0x0d
|
||||
/* 0x0e not used */
|
||||
#define DRM_R128_INDIRECT 0x0f
|
||||
#define DRM_R128_FULLSCREEN 0x10
|
||||
#define DRM_R128_CLEAR2 0x11
|
||||
#define DRM_R128_GETPARAM 0x12
|
||||
#define DRM_R128_FLIP 0x13
|
||||
|
||||
#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
|
||||
#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
|
||||
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
|
||||
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
|
||||
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
|
||||
/* 0x05 not used */
|
||||
#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
|
||||
#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
|
||||
#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
|
||||
#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
|
||||
#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
|
||||
#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
|
||||
#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
|
||||
#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
|
||||
/* 0x0e not used */
|
||||
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
|
||||
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
|
||||
#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
|
||||
#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
|
||||
#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
|
||||
|
||||
typedef struct drm_r128_init {
|
||||
enum {
|
||||
R128_INIT_CCE = 0x01,
|
||||
R128_CLEANUP_CCE = 0x02
|
||||
} func;
|
||||
unsigned long sarea_priv_offset;
|
||||
int is_pci;
|
||||
int cce_mode;
|
||||
int cce_secure;
|
||||
int ring_size;
|
||||
int usec_timeout;
|
||||
|
||||
unsigned int fb_bpp;
|
||||
unsigned int front_offset, front_pitch;
|
||||
unsigned int back_offset, back_pitch;
|
||||
unsigned int depth_bpp;
|
||||
unsigned int depth_offset, depth_pitch;
|
||||
unsigned int span_offset;
|
||||
|
||||
unsigned long fb_offset;
|
||||
unsigned long mmio_offset;
|
||||
unsigned long ring_offset;
|
||||
unsigned long ring_rptr_offset;
|
||||
unsigned long buffers_offset;
|
||||
unsigned long agp_textures_offset;
|
||||
} drm_r128_init_t;
|
||||
|
||||
typedef struct drm_r128_cce_stop {
|
||||
int flush;
|
||||
int idle;
|
||||
} drm_r128_cce_stop_t;
|
||||
|
||||
typedef struct drm_r128_clear {
|
||||
unsigned int flags;
|
||||
unsigned int clear_color;
|
||||
unsigned int clear_depth;
|
||||
unsigned int color_mask;
|
||||
unsigned int depth_mask;
|
||||
} drm_r128_clear_t;
|
||||
|
||||
typedef struct drm_r128_vertex {
|
||||
int prim;
|
||||
int idx; /* Index of vertex buffer */
|
||||
int count; /* Number of vertices in buffer */
|
||||
int discard; /* Client finished with buffer? */
|
||||
} drm_r128_vertex_t;
|
||||
|
||||
typedef struct drm_r128_indices {
|
||||
int prim;
|
||||
int idx;
|
||||
int start;
|
||||
int end;
|
||||
int discard; /* Client finished with buffer? */
|
||||
} drm_r128_indices_t;
|
||||
|
||||
typedef struct drm_r128_blit {
|
||||
int idx;
|
||||
int pitch;
|
||||
int offset;
|
||||
int format;
|
||||
unsigned short x, y;
|
||||
unsigned short width, height;
|
||||
} drm_r128_blit_t;
|
||||
|
||||
typedef struct drm_r128_depth {
|
||||
enum {
|
||||
R128_WRITE_SPAN = 0x01,
|
||||
R128_WRITE_PIXELS = 0x02,
|
||||
R128_READ_SPAN = 0x03,
|
||||
R128_READ_PIXELS = 0x04
|
||||
} func;
|
||||
int n;
|
||||
int *x;
|
||||
int *y;
|
||||
unsigned int *buffer;
|
||||
unsigned char *mask;
|
||||
} drm_r128_depth_t;
|
||||
|
||||
typedef struct drm_r128_stipple {
|
||||
unsigned int *mask;
|
||||
} drm_r128_stipple_t;
|
||||
|
||||
typedef struct drm_r128_indirect {
|
||||
int idx;
|
||||
int start;
|
||||
int end;
|
||||
int discard;
|
||||
} drm_r128_indirect_t;
|
||||
|
||||
typedef struct drm_r128_fullscreen {
|
||||
enum {
|
||||
R128_INIT_FULLSCREEN = 0x01,
|
||||
R128_CLEANUP_FULLSCREEN = 0x02
|
||||
} func;
|
||||
} drm_r128_fullscreen_t;
|
||||
|
||||
/* 2.3: An ioctl to get parameters that aren't available to the 3d
|
||||
* client any other way.
|
||||
*/
|
||||
#define R128_PARAM_IRQ_NR 1
|
||||
|
||||
typedef struct drm_r128_getparam {
|
||||
int param;
|
||||
void *value;
|
||||
} drm_r128_getparam_t;
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
1078
drm/radeon_drm.h
Normal file
1078
drm/radeon_drm.h
Normal file
File diff suppressed because it is too large
Load Diff
220
drm/savage_drm.h
Normal file
220
drm/savage_drm.h
Normal file
@ -0,0 +1,220 @@
|
||||
/* savage_drm.h -- Public header for the savage driver
|
||||
*
|
||||
* Copyright 2004 Felix Kuehling
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
|
||||
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __SAVAGE_DRM_H__
|
||||
#define __SAVAGE_DRM_H__
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef __SAVAGE_SAREA_DEFINES__
|
||||
#define __SAVAGE_SAREA_DEFINES__
|
||||
|
||||
/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
|
||||
* regions, subject to a minimum region size of (1<<16) == 64k.
|
||||
*
|
||||
* Clients may subdivide regions internally, but when sharing between
|
||||
* clients, the region size is the minimum granularity.
|
||||
*/
|
||||
|
||||
#define SAVAGE_CARD_HEAP 0
|
||||
#define SAVAGE_AGP_HEAP 1
|
||||
#define SAVAGE_NR_TEX_HEAPS 2
|
||||
#define SAVAGE_NR_TEX_REGIONS 16
|
||||
#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
|
||||
|
||||
#endif /* __SAVAGE_SAREA_DEFINES__ */
|
||||
|
||||
typedef struct _drm_savage_sarea {
|
||||
/* LRU lists for texture memory in agp space and on the card.
|
||||
*/
|
||||
struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
|
||||
1];
|
||||
unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
|
||||
|
||||
/* Mechanism to validate card state.
|
||||
*/
|
||||
int ctxOwner;
|
||||
} drm_savage_sarea_t, *drm_savage_sarea_ptr;
|
||||
|
||||
/* Savage-specific ioctls
|
||||
*/
|
||||
#define DRM_SAVAGE_BCI_INIT 0x00
|
||||
#define DRM_SAVAGE_BCI_CMDBUF 0x01
|
||||
#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
|
||||
#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
|
||||
|
||||
#define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
|
||||
#define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
|
||||
#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
|
||||
#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
|
||||
|
||||
#define SAVAGE_DMA_PCI 1
|
||||
#define SAVAGE_DMA_AGP 3
|
||||
typedef struct drm_savage_init {
|
||||
enum {
|
||||
SAVAGE_INIT_BCI = 1,
|
||||
SAVAGE_CLEANUP_BCI = 2
|
||||
} func;
|
||||
unsigned int sarea_priv_offset;
|
||||
|
||||
/* some parameters */
|
||||
unsigned int cob_size;
|
||||
unsigned int bci_threshold_lo, bci_threshold_hi;
|
||||
unsigned int dma_type;
|
||||
|
||||
/* frame buffer layout */
|
||||
unsigned int fb_bpp;
|
||||
unsigned int front_offset, front_pitch;
|
||||
unsigned int back_offset, back_pitch;
|
||||
unsigned int depth_bpp;
|
||||
unsigned int depth_offset, depth_pitch;
|
||||
|
||||
/* local textures */
|
||||
unsigned int texture_offset;
|
||||
unsigned int texture_size;
|
||||
|
||||
/* physical locations of non-permanent maps */
|
||||
unsigned long status_offset;
|
||||
unsigned long buffers_offset;
|
||||
unsigned long agp_textures_offset;
|
||||
unsigned long cmd_dma_offset;
|
||||
} drm_savage_init_t;
|
||||
|
||||
typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
|
||||
typedef struct drm_savage_cmdbuf {
|
||||
/* command buffer in client's address space */
|
||||
drm_savage_cmd_header_t *cmd_addr;
|
||||
unsigned int size; /* size of the command buffer in 64bit units */
|
||||
|
||||
unsigned int dma_idx; /* DMA buffer index to use */
|
||||
int discard; /* discard DMA buffer when done */
|
||||
/* vertex buffer in client's address space */
|
||||
unsigned int *vb_addr;
|
||||
unsigned int vb_size; /* size of client vertex buffer in bytes */
|
||||
unsigned int vb_stride; /* stride of vertices in 32bit words */
|
||||
/* boxes in client's address space */
|
||||
struct drm_clip_rect *box_addr;
|
||||
unsigned int nbox; /* number of clipping boxes */
|
||||
} drm_savage_cmdbuf_t;
|
||||
|
||||
#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
|
||||
#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
|
||||
#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
|
||||
typedef struct drm_savage_event {
|
||||
unsigned int count;
|
||||
unsigned int flags;
|
||||
} drm_savage_event_emit_t, drm_savage_event_wait_t;
|
||||
|
||||
/* Commands for the cmdbuf ioctl
|
||||
*/
|
||||
#define SAVAGE_CMD_STATE 0 /* a range of state registers */
|
||||
#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
|
||||
#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
|
||||
#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
|
||||
#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
|
||||
#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
|
||||
#define SAVAGE_CMD_SWAP 6 /* swap buffers */
|
||||
|
||||
/* Primitive types
|
||||
*/
|
||||
#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
|
||||
#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
|
||||
#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
|
||||
#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
|
||||
* shading on s3d */
|
||||
|
||||
/* Skip flags (vertex format)
|
||||
*/
|
||||
#define SAVAGE_SKIP_Z 0x01
|
||||
#define SAVAGE_SKIP_W 0x02
|
||||
#define SAVAGE_SKIP_C0 0x04
|
||||
#define SAVAGE_SKIP_C1 0x08
|
||||
#define SAVAGE_SKIP_S0 0x10
|
||||
#define SAVAGE_SKIP_T0 0x20
|
||||
#define SAVAGE_SKIP_ST0 0x30
|
||||
#define SAVAGE_SKIP_S1 0x40
|
||||
#define SAVAGE_SKIP_T1 0x80
|
||||
#define SAVAGE_SKIP_ST1 0xc0
|
||||
#define SAVAGE_SKIP_ALL_S3D 0x3f
|
||||
#define SAVAGE_SKIP_ALL_S4 0xff
|
||||
|
||||
/* Buffer names for clear command
|
||||
*/
|
||||
#define SAVAGE_FRONT 0x1
|
||||
#define SAVAGE_BACK 0x2
|
||||
#define SAVAGE_DEPTH 0x4
|
||||
|
||||
/* 64-bit command header
|
||||
*/
|
||||
union drm_savage_cmd_header {
|
||||
struct {
|
||||
unsigned char cmd; /* command */
|
||||
unsigned char pad0;
|
||||
unsigned short pad1;
|
||||
unsigned short pad2;
|
||||
unsigned short pad3;
|
||||
} cmd; /* generic */
|
||||
struct {
|
||||
unsigned char cmd;
|
||||
unsigned char global; /* need idle engine? */
|
||||
unsigned short count; /* number of consecutive registers */
|
||||
unsigned short start; /* first register */
|
||||
unsigned short pad3;
|
||||
} state; /* SAVAGE_CMD_STATE */
|
||||
struct {
|
||||
unsigned char cmd;
|
||||
unsigned char prim; /* primitive type */
|
||||
unsigned short skip; /* vertex format (skip flags) */
|
||||
unsigned short count; /* number of vertices */
|
||||
unsigned short start; /* first vertex in DMA/vertex buffer */
|
||||
} prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
|
||||
struct {
|
||||
unsigned char cmd;
|
||||
unsigned char prim;
|
||||
unsigned short skip;
|
||||
unsigned short count; /* number of indices that follow */
|
||||
unsigned short pad3;
|
||||
} idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
|
||||
struct {
|
||||
unsigned char cmd;
|
||||
unsigned char pad0;
|
||||
unsigned short pad1;
|
||||
unsigned int flags;
|
||||
} clear0; /* SAVAGE_CMD_CLEAR */
|
||||
struct {
|
||||
unsigned int mask;
|
||||
unsigned int value;
|
||||
} clear1; /* SAVAGE_CMD_CLEAR data */
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
77
drm/sis_drm.h
Normal file
77
drm/sis_drm.h
Normal file
@ -0,0 +1,77 @@
|
||||
/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
|
||||
/*
|
||||
* Copyright 2005 Eric Anholt
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __SIS_DRM_H__
|
||||
#define __SIS_DRM_H__
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* SiS specific ioctls */
|
||||
#define NOT_USED_0_3
|
||||
#define DRM_SIS_FB_ALLOC 0x04
|
||||
#define DRM_SIS_FB_FREE 0x05
|
||||
#define NOT_USED_6_12
|
||||
#define DRM_SIS_AGP_INIT 0x13
|
||||
#define DRM_SIS_AGP_ALLOC 0x14
|
||||
#define DRM_SIS_AGP_FREE 0x15
|
||||
#define DRM_SIS_FB_INIT 0x16
|
||||
|
||||
#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
|
||||
#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
|
||||
#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
|
||||
#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
|
||||
#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
|
||||
#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
|
||||
/*
|
||||
#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
|
||||
#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49)
|
||||
#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50)
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
int context;
|
||||
unsigned long offset;
|
||||
unsigned long size;
|
||||
unsigned long free;
|
||||
} drm_sis_mem_t;
|
||||
|
||||
typedef struct {
|
||||
unsigned long offset, size;
|
||||
} drm_sis_agp_t;
|
||||
|
||||
typedef struct {
|
||||
unsigned long offset, size;
|
||||
} drm_sis_fb_t;
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __SIS_DRM_H__ */
|
1060
drm/tegra_drm.h
Normal file
1060
drm/tegra_drm.h
Normal file
File diff suppressed because it is too large
Load Diff
481
drm/v3d_drm.h
Normal file
481
drm/v3d_drm.h
Normal file
@ -0,0 +1,481 @@
|
||||
/*
|
||||
* Copyright © 2014-2018 Broadcom
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _V3D_DRM_H_
|
||||
#define _V3D_DRM_H_
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define DRM_V3D_SUBMIT_CL 0x00
|
||||
#define DRM_V3D_WAIT_BO 0x01
|
||||
#define DRM_V3D_CREATE_BO 0x02
|
||||
#define DRM_V3D_MMAP_BO 0x03
|
||||
#define DRM_V3D_GET_PARAM 0x04
|
||||
#define DRM_V3D_GET_BO_OFFSET 0x05
|
||||
#define DRM_V3D_SUBMIT_TFU 0x06
|
||||
#define DRM_V3D_SUBMIT_CSD 0x07
|
||||
#define DRM_V3D_PERFMON_CREATE 0x08
|
||||
#define DRM_V3D_PERFMON_DESTROY 0x09
|
||||
#define DRM_V3D_PERFMON_GET_VALUES 0x0a
|
||||
|
||||
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
|
||||
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
|
||||
#define DRM_IOCTL_V3D_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo)
|
||||
#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
|
||||
#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
|
||||
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
|
||||
#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
|
||||
#define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd)
|
||||
#define DRM_IOCTL_V3D_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_CREATE, \
|
||||
struct drm_v3d_perfmon_create)
|
||||
#define DRM_IOCTL_V3D_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_DESTROY, \
|
||||
struct drm_v3d_perfmon_destroy)
|
||||
#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
|
||||
struct drm_v3d_perfmon_get_values)
|
||||
|
||||
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
|
||||
#define DRM_V3D_SUBMIT_EXTENSION 0x02
|
||||
|
||||
/* struct drm_v3d_extension - ioctl extensions
|
||||
*
|
||||
* Linked-list of generic extensions where the id identify which struct is
|
||||
* pointed by ext_data. Therefore, DRM_V3D_EXT_ID_* is used on id to identify
|
||||
* the extension type.
|
||||
*/
|
||||
struct drm_v3d_extension {
|
||||
__u64 next;
|
||||
__u32 id;
|
||||
#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
|
||||
__u32 flags; /* mbz */
|
||||
};
|
||||
|
||||
/* struct drm_v3d_sem - wait/signal semaphore
|
||||
*
|
||||
* If binary semaphore, it only takes syncobj handle and ignores flags and
|
||||
* point fields. Point is defined for timeline syncobj feature.
|
||||
*/
|
||||
struct drm_v3d_sem {
|
||||
__u32 handle; /* syncobj */
|
||||
/* rsv below, for future uses */
|
||||
__u32 flags;
|
||||
__u64 point; /* for timeline sem support */
|
||||
__u64 mbz[2]; /* must be zero, rsv */
|
||||
};
|
||||
|
||||
/* Enum for each of the V3D queues. */
|
||||
enum v3d_queue {
|
||||
V3D_BIN,
|
||||
V3D_RENDER,
|
||||
V3D_TFU,
|
||||
V3D_CSD,
|
||||
V3D_CACHE_CLEAN,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_v3d_multi_sync - ioctl extension to add support multiples
|
||||
* syncobjs for commands submission.
|
||||
*
|
||||
* When an extension of DRM_V3D_EXT_ID_MULTI_SYNC id is defined, it points to
|
||||
* this extension to define wait and signal dependencies, instead of single
|
||||
* in/out sync entries on submitting commands. The field flags is used to
|
||||
* determine the stage to set wait dependencies.
|
||||
*/
|
||||
struct drm_v3d_multi_sync {
|
||||
struct drm_v3d_extension base;
|
||||
/* Array of wait and signal semaphores */
|
||||
__u64 in_syncs;
|
||||
__u64 out_syncs;
|
||||
|
||||
/* Number of entries */
|
||||
__u32 in_sync_count;
|
||||
__u32 out_sync_count;
|
||||
|
||||
/* set the stage (v3d_queue) to sync */
|
||||
__u32 wait_stage;
|
||||
|
||||
__u32 pad; /* mbz */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
|
||||
* engine.
|
||||
*
|
||||
* This asks the kernel to have the GPU execute an optional binner
|
||||
* command list, and a render command list.
|
||||
*
|
||||
* The L1T, slice, L2C, L2T, and GCA caches will be flushed before
|
||||
* each CL executes. The VCD cache should be flushed (if necessary)
|
||||
* by the submitted CLs. The TLB writes are guaranteed to have been
|
||||
* flushed by the time the render done IRQ happens, which is the
|
||||
* trigger for out_sync. Any dirtying of cachelines by the job (only
|
||||
* possible using TMU writes) must be flushed by the caller using the
|
||||
* DRM_V3D_SUBMIT_CL_FLUSH_CACHE_FLAG flag.
|
||||
*/
|
||||
struct drm_v3d_submit_cl {
|
||||
/* Pointer to the binner command list.
|
||||
*
|
||||
* This is the first set of commands executed, which runs the
|
||||
* coordinate shader to determine where primitives land on the screen,
|
||||
* then writes out the state updates and draw calls necessary per tile
|
||||
* to the tile allocation BO.
|
||||
*
|
||||
* This BCL will block on any previous BCL submitted on the
|
||||
* same FD, but not on any RCL or BCLs submitted by other
|
||||
* clients -- that is left up to the submitter to control
|
||||
* using in_sync_bcl if necessary.
|
||||
*/
|
||||
__u32 bcl_start;
|
||||
|
||||
/** End address of the BCL (first byte after the BCL) */
|
||||
__u32 bcl_end;
|
||||
|
||||
/* Offset of the render command list.
|
||||
*
|
||||
* This is the second set of commands executed, which will either
|
||||
* execute the tiles that have been set up by the BCL, or a fixed set
|
||||
* of tiles (in the case of RCL-only blits).
|
||||
*
|
||||
* This RCL will block on this submit's BCL, and any previous
|
||||
* RCL submitted on the same FD, but not on any RCL or BCLs
|
||||
* submitted by other clients -- that is left up to the
|
||||
* submitter to control using in_sync_rcl if necessary.
|
||||
*/
|
||||
__u32 rcl_start;
|
||||
|
||||
/** End address of the RCL (first byte after the RCL) */
|
||||
__u32 rcl_end;
|
||||
|
||||
/** An optional sync object to wait on before starting the BCL. */
|
||||
__u32 in_sync_bcl;
|
||||
/** An optional sync object to wait on before starting the RCL. */
|
||||
__u32 in_sync_rcl;
|
||||
/** An optional sync object to place the completion fence in. */
|
||||
__u32 out_sync;
|
||||
|
||||
/* Offset of the tile alloc memory
|
||||
*
|
||||
* This is optional on V3D 3.3 (where the CL can set the value) but
|
||||
* required on V3D 4.1.
|
||||
*/
|
||||
__u32 qma;
|
||||
|
||||
/** Size of the tile alloc memory. */
|
||||
__u32 qms;
|
||||
|
||||
/** Offset of the tile state data array. */
|
||||
__u32 qts;
|
||||
|
||||
/* Pointer to a u32 array of the BOs that are referenced by the job.
|
||||
*/
|
||||
__u64 bo_handles;
|
||||
|
||||
/* Number of BO handles passed in (size is that times 4). */
|
||||
__u32 bo_handle_count;
|
||||
|
||||
/* DRM_V3D_SUBMIT_* properties */
|
||||
__u32 flags;
|
||||
|
||||
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
|
||||
__u32 perfmon_id;
|
||||
|
||||
__u32 pad;
|
||||
|
||||
/* Pointer to an array of ioctl extensions*/
|
||||
__u64 extensions;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_v3d_wait_bo - ioctl argument for waiting for
|
||||
* completion of the last DRM_V3D_SUBMIT_CL on a BO.
|
||||
*
|
||||
* This is useful for cases where multiple processes might be
|
||||
* rendering to a BO and you want to wait for all rendering to be
|
||||
* completed.
|
||||
*/
|
||||
struct drm_v3d_wait_bo {
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
__u64 timeout_ns;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_v3d_create_bo - ioctl argument for creating V3D BOs.
|
||||
*
|
||||
* There are currently no values for the flags argument, but it may be
|
||||
* used in a future extension.
|
||||
*/
|
||||
struct drm_v3d_create_bo {
|
||||
__u32 size;
|
||||
__u32 flags;
|
||||
/** Returned GEM handle for the BO. */
|
||||
__u32 handle;
|
||||
/**
|
||||
* Returned offset for the BO in the V3D address space. This offset
|
||||
* is private to the DRM fd and is valid for the lifetime of the GEM
|
||||
* handle.
|
||||
*
|
||||
* This offset value will always be nonzero, since various HW
|
||||
* units treat 0 specially.
|
||||
*/
|
||||
__u32 offset;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs.
|
||||
*
|
||||
* This doesn't actually perform an mmap. Instead, it returns the
|
||||
* offset you need to use in an mmap on the DRM device node. This
|
||||
* means that tools like valgrind end up knowing about the mapped
|
||||
* memory.
|
||||
*
|
||||
* There are currently no values for the flags argument, but it may be
|
||||
* used in a future extension.
|
||||
*/
|
||||
struct drm_v3d_mmap_bo {
|
||||
/** Handle for the object being mapped. */
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
/** offset into the drm node to use for subsequent mmap call. */
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
enum drm_v3d_param {
|
||||
DRM_V3D_PARAM_V3D_UIFCFG,
|
||||
DRM_V3D_PARAM_V3D_HUB_IDENT1,
|
||||
DRM_V3D_PARAM_V3D_HUB_IDENT2,
|
||||
DRM_V3D_PARAM_V3D_HUB_IDENT3,
|
||||
DRM_V3D_PARAM_V3D_CORE0_IDENT0,
|
||||
DRM_V3D_PARAM_V3D_CORE0_IDENT1,
|
||||
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
|
||||
DRM_V3D_PARAM_SUPPORTS_TFU,
|
||||
DRM_V3D_PARAM_SUPPORTS_CSD,
|
||||
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
|
||||
DRM_V3D_PARAM_SUPPORTS_PERFMON,
|
||||
DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
|
||||
};
|
||||
|
||||
struct drm_v3d_get_param {
|
||||
__u32 param;
|
||||
__u32 pad;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the offset for the BO in the V3D address space for this DRM fd.
|
||||
* This is the same value returned by drm_v3d_create_bo, if that was called
|
||||
* from this DRM fd.
|
||||
*/
|
||||
struct drm_v3d_get_bo_offset {
|
||||
__u32 handle;
|
||||
__u32 offset;
|
||||
};
|
||||
|
||||
struct drm_v3d_submit_tfu {
|
||||
__u32 icfg;
|
||||
__u32 iia;
|
||||
__u32 iis;
|
||||
__u32 ica;
|
||||
__u32 iua;
|
||||
__u32 ioa;
|
||||
__u32 ios;
|
||||
__u32 coef[4];
|
||||
/* First handle is the output BO, following are other inputs.
|
||||
* 0 for unused.
|
||||
*/
|
||||
__u32 bo_handles[4];
|
||||
/* sync object to block on before running the TFU job. Each TFU
|
||||
* job will execute in the order submitted to its FD. Synchronization
|
||||
* against rendering jobs requires using sync objects.
|
||||
*/
|
||||
__u32 in_sync;
|
||||
/* Sync object to signal when the TFU job is done. */
|
||||
__u32 out_sync;
|
||||
|
||||
__u32 flags;
|
||||
|
||||
/* Pointer to an array of ioctl extensions*/
|
||||
__u64 extensions;
|
||||
};
|
||||
|
||||
/* Submits a compute shader for dispatch. This job will block on any
|
||||
* previous compute shaders submitted on this fd, and any other
|
||||
* synchronization must be performed with in_sync/out_sync.
|
||||
*/
|
||||
struct drm_v3d_submit_csd {
|
||||
__u32 cfg[7];
|
||||
__u32 coef[4];
|
||||
|
||||
/* Pointer to a u32 array of the BOs that are referenced by the job.
|
||||
*/
|
||||
__u64 bo_handles;
|
||||
|
||||
/* Number of BO handles passed in (size is that times 4). */
|
||||
__u32 bo_handle_count;
|
||||
|
||||
/* sync object to block on before running the CSD job. Each
|
||||
* CSD job will execute in the order submitted to its FD.
|
||||
* Synchronization against rendering/TFU jobs or CSD from
|
||||
* other fds requires using sync objects.
|
||||
*/
|
||||
__u32 in_sync;
|
||||
/* Sync object to signal when the CSD job is done. */
|
||||
__u32 out_sync;
|
||||
|
||||
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
|
||||
__u32 perfmon_id;
|
||||
|
||||
/* Pointer to an array of ioctl extensions*/
|
||||
__u64 extensions;
|
||||
|
||||
__u32 flags;
|
||||
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
enum {
|
||||
V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
|
||||
V3D_PERFCNT_FEP_VALID_PRIMS,
|
||||
V3D_PERFCNT_FEP_EZ_NFCLIP_QUADS,
|
||||
V3D_PERFCNT_FEP_VALID_QUADS,
|
||||
V3D_PERFCNT_TLB_QUADS_STENCIL_FAIL,
|
||||
V3D_PERFCNT_TLB_QUADS_STENCILZ_FAIL,
|
||||
V3D_PERFCNT_TLB_QUADS_STENCILZ_PASS,
|
||||
V3D_PERFCNT_TLB_QUADS_ZERO_COV,
|
||||
V3D_PERFCNT_TLB_QUADS_NONZERO_COV,
|
||||
V3D_PERFCNT_TLB_QUADS_WRITTEN,
|
||||
V3D_PERFCNT_PTB_PRIM_VIEWPOINT_DISCARD,
|
||||
V3D_PERFCNT_PTB_PRIM_CLIP,
|
||||
V3D_PERFCNT_PTB_PRIM_REV,
|
||||
V3D_PERFCNT_QPU_IDLE_CYCLES,
|
||||
V3D_PERFCNT_QPU_ACTIVE_CYCLES_VERTEX_COORD_USER,
|
||||
V3D_PERFCNT_QPU_ACTIVE_CYCLES_FRAG,
|
||||
V3D_PERFCNT_QPU_CYCLES_VALID_INSTR,
|
||||
V3D_PERFCNT_QPU_CYCLES_TMU_STALL,
|
||||
V3D_PERFCNT_QPU_CYCLES_SCOREBOARD_STALL,
|
||||
V3D_PERFCNT_QPU_CYCLES_VARYINGS_STALL,
|
||||
V3D_PERFCNT_QPU_IC_HIT,
|
||||
V3D_PERFCNT_QPU_IC_MISS,
|
||||
V3D_PERFCNT_QPU_UC_HIT,
|
||||
V3D_PERFCNT_QPU_UC_MISS,
|
||||
V3D_PERFCNT_TMU_TCACHE_ACCESS,
|
||||
V3D_PERFCNT_TMU_TCACHE_MISS,
|
||||
V3D_PERFCNT_VPM_VDW_STALL,
|
||||
V3D_PERFCNT_VPM_VCD_STALL,
|
||||
V3D_PERFCNT_BIN_ACTIVE,
|
||||
V3D_PERFCNT_RDR_ACTIVE,
|
||||
V3D_PERFCNT_L2T_HITS,
|
||||
V3D_PERFCNT_L2T_MISSES,
|
||||
V3D_PERFCNT_CYCLE_COUNT,
|
||||
V3D_PERFCNT_QPU_CYCLES_STALLED_VERTEX_COORD_USER,
|
||||
V3D_PERFCNT_QPU_CYCLES_STALLED_FRAGMENT,
|
||||
V3D_PERFCNT_PTB_PRIMS_BINNED,
|
||||
V3D_PERFCNT_AXI_WRITES_WATCH_0,
|
||||
V3D_PERFCNT_AXI_READS_WATCH_0,
|
||||
V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_0,
|
||||
V3D_PERFCNT_AXI_READ_STALLS_WATCH_0,
|
||||
V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_0,
|
||||
V3D_PERFCNT_AXI_READ_BYTES_WATCH_0,
|
||||
V3D_PERFCNT_AXI_WRITES_WATCH_1,
|
||||
V3D_PERFCNT_AXI_READS_WATCH_1,
|
||||
V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_1,
|
||||
V3D_PERFCNT_AXI_READ_STALLS_WATCH_1,
|
||||
V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_1,
|
||||
V3D_PERFCNT_AXI_READ_BYTES_WATCH_1,
|
||||
V3D_PERFCNT_TLB_PARTIAL_QUADS,
|
||||
V3D_PERFCNT_TMU_CONFIG_ACCESSES,
|
||||
V3D_PERFCNT_L2T_NO_ID_STALL,
|
||||
V3D_PERFCNT_L2T_COM_QUE_STALL,
|
||||
V3D_PERFCNT_L2T_TMU_WRITES,
|
||||
V3D_PERFCNT_TMU_ACTIVE_CYCLES,
|
||||
V3D_PERFCNT_TMU_STALLED_CYCLES,
|
||||
V3D_PERFCNT_CLE_ACTIVE,
|
||||
V3D_PERFCNT_L2T_TMU_READS,
|
||||
V3D_PERFCNT_L2T_CLE_READS,
|
||||
V3D_PERFCNT_L2T_VCD_READS,
|
||||
V3D_PERFCNT_L2T_TMUCFG_READS,
|
||||
V3D_PERFCNT_L2T_SLC0_READS,
|
||||
V3D_PERFCNT_L2T_SLC1_READS,
|
||||
V3D_PERFCNT_L2T_SLC2_READS,
|
||||
V3D_PERFCNT_L2T_TMU_W_MISSES,
|
||||
V3D_PERFCNT_L2T_TMU_R_MISSES,
|
||||
V3D_PERFCNT_L2T_CLE_MISSES,
|
||||
V3D_PERFCNT_L2T_VCD_MISSES,
|
||||
V3D_PERFCNT_L2T_TMUCFG_MISSES,
|
||||
V3D_PERFCNT_L2T_SLC0_MISSES,
|
||||
V3D_PERFCNT_L2T_SLC1_MISSES,
|
||||
V3D_PERFCNT_L2T_SLC2_MISSES,
|
||||
V3D_PERFCNT_CORE_MEM_WRITES,
|
||||
V3D_PERFCNT_L2T_MEM_WRITES,
|
||||
V3D_PERFCNT_PTB_MEM_WRITES,
|
||||
V3D_PERFCNT_TLB_MEM_WRITES,
|
||||
V3D_PERFCNT_CORE_MEM_READS,
|
||||
V3D_PERFCNT_L2T_MEM_READS,
|
||||
V3D_PERFCNT_PTB_MEM_READS,
|
||||
V3D_PERFCNT_PSE_MEM_READS,
|
||||
V3D_PERFCNT_TLB_MEM_READS,
|
||||
V3D_PERFCNT_GMP_MEM_READS,
|
||||
V3D_PERFCNT_PTB_W_MEM_WORDS,
|
||||
V3D_PERFCNT_TLB_W_MEM_WORDS,
|
||||
V3D_PERFCNT_PSE_R_MEM_WORDS,
|
||||
V3D_PERFCNT_TLB_R_MEM_WORDS,
|
||||
V3D_PERFCNT_TMU_MRU_HITS,
|
||||
V3D_PERFCNT_COMPUTE_ACTIVE,
|
||||
V3D_PERFCNT_NUM,
|
||||
};
|
||||
|
||||
#define DRM_V3D_MAX_PERF_COUNTERS 32
|
||||
|
||||
struct drm_v3d_perfmon_create {
|
||||
__u32 id;
|
||||
__u32 ncounters;
|
||||
__u8 counters[DRM_V3D_MAX_PERF_COUNTERS];
|
||||
};
|
||||
|
||||
struct drm_v3d_perfmon_destroy {
|
||||
__u32 id;
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns the values of the performance counters tracked by this
|
||||
* perfmon (as an array of ncounters u64 values).
|
||||
*
|
||||
* No implicit synchronization is performed, so the user has to
|
||||
* guarantee that any jobs using this perfmon have already been
|
||||
* completed (probably by blocking on the seqno returned by the
|
||||
* last exec that used the perfmon).
|
||||
*/
|
||||
struct drm_v3d_perfmon_get_values {
|
||||
__u32 id;
|
||||
__u32 pad;
|
||||
__u64 values_ptr;
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _V3D_DRM_H_ */
|
442
drm/vc4_drm.h
Normal file
442
drm/vc4_drm.h
Normal file
@ -0,0 +1,442 @@
|
||||
/*
|
||||
* Copyright © 2014-2015 Broadcom
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _VC4_DRM_H_
|
||||
#define _VC4_DRM_H_
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define DRM_VC4_SUBMIT_CL 0x00
|
||||
#define DRM_VC4_WAIT_SEQNO 0x01
|
||||
#define DRM_VC4_WAIT_BO 0x02
|
||||
#define DRM_VC4_CREATE_BO 0x03
|
||||
#define DRM_VC4_MMAP_BO 0x04
|
||||
#define DRM_VC4_CREATE_SHADER_BO 0x05
|
||||
#define DRM_VC4_GET_HANG_STATE 0x06
|
||||
#define DRM_VC4_GET_PARAM 0x07
|
||||
#define DRM_VC4_SET_TILING 0x08
|
||||
#define DRM_VC4_GET_TILING 0x09
|
||||
#define DRM_VC4_LABEL_BO 0x0a
|
||||
#define DRM_VC4_GEM_MADVISE 0x0b
|
||||
#define DRM_VC4_PERFMON_CREATE 0x0c
|
||||
#define DRM_VC4_PERFMON_DESTROY 0x0d
|
||||
#define DRM_VC4_PERFMON_GET_VALUES 0x0e
|
||||
|
||||
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
|
||||
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
|
||||
#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
|
||||
#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
|
||||
#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
|
||||
#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
|
||||
#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
|
||||
#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
|
||||
#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
|
||||
#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
|
||||
#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
|
||||
#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
|
||||
#define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create)
|
||||
#define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy)
|
||||
#define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values)
|
||||
|
||||
struct drm_vc4_submit_rcl_surface {
|
||||
__u32 hindex; /* Handle index, or ~0 if not present. */
|
||||
__u32 offset; /* Offset to start of buffer. */
|
||||
/*
|
||||
* Bits for either render config (color_write) or load/store packet.
|
||||
* Bits should all be 0 for MSAA load/stores.
|
||||
*/
|
||||
__u16 bits;
|
||||
|
||||
#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
|
||||
__u16 flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
|
||||
* engine.
|
||||
*
|
||||
* Drivers typically use GPU BOs to store batchbuffers / command lists and
|
||||
* their associated state. However, because the VC4 lacks an MMU, we have to
|
||||
* do validation of memory accesses by the GPU commands. If we were to store
|
||||
* our commands in BOs, we'd need to do uncached readback from them to do the
|
||||
* validation process, which is too expensive. Instead, userspace accumulates
|
||||
* commands and associated state in plain memory, then the kernel copies the
|
||||
* data to its own address space, and then validates and stores it in a GPU
|
||||
* BO.
|
||||
*/
|
||||
struct drm_vc4_submit_cl {
|
||||
/* Pointer to the binner command list.
|
||||
*
|
||||
* This is the first set of commands executed, which runs the
|
||||
* coordinate shader to determine where primitives land on the screen,
|
||||
* then writes out the state updates and draw calls necessary per tile
|
||||
* to the tile allocation BO.
|
||||
*/
|
||||
__u64 bin_cl;
|
||||
|
||||
/* Pointer to the shader records.
|
||||
*
|
||||
* Shader records are the structures read by the hardware that contain
|
||||
* pointers to uniforms, shaders, and vertex attributes. The
|
||||
* reference to the shader record has enough information to determine
|
||||
* how many pointers are necessary (fixed number for shaders/uniforms,
|
||||
* and an attribute count), so those BO indices into bo_handles are
|
||||
* just stored as __u32s before each shader record passed in.
|
||||
*/
|
||||
__u64 shader_rec;
|
||||
|
||||
/* Pointer to uniform data and texture handles for the textures
|
||||
* referenced by the shader.
|
||||
*
|
||||
* For each shader state record, there is a set of uniform data in the
|
||||
* order referenced by the record (FS, VS, then CS). Each set of
|
||||
* uniform data has a __u32 index into bo_handles per texture
|
||||
* sample operation, in the order the QPU_W_TMUn_S writes appear in
|
||||
* the program. Following the texture BO handle indices is the actual
|
||||
* uniform data.
|
||||
*
|
||||
* The individual uniform state blocks don't have sizes passed in,
|
||||
* because the kernel has to determine the sizes anyway during shader
|
||||
* code validation.
|
||||
*/
|
||||
__u64 uniforms;
|
||||
__u64 bo_handles;
|
||||
|
||||
/* Size in bytes of the binner command list. */
|
||||
__u32 bin_cl_size;
|
||||
/* Size in bytes of the set of shader records. */
|
||||
__u32 shader_rec_size;
|
||||
/* Number of shader records.
|
||||
*
|
||||
* This could just be computed from the contents of shader_records and
|
||||
* the address bits of references to them from the bin CL, but it
|
||||
* keeps the kernel from having to resize some allocations it makes.
|
||||
*/
|
||||
__u32 shader_rec_count;
|
||||
/* Size in bytes of the uniform state. */
|
||||
__u32 uniforms_size;
|
||||
|
||||
/* Number of BO handles passed in (size is that times 4). */
|
||||
__u32 bo_handle_count;
|
||||
|
||||
/* RCL setup: */
|
||||
__u16 width;
|
||||
__u16 height;
|
||||
__u8 min_x_tile;
|
||||
__u8 min_y_tile;
|
||||
__u8 max_x_tile;
|
||||
__u8 max_y_tile;
|
||||
struct drm_vc4_submit_rcl_surface color_read;
|
||||
struct drm_vc4_submit_rcl_surface color_write;
|
||||
struct drm_vc4_submit_rcl_surface zs_read;
|
||||
struct drm_vc4_submit_rcl_surface zs_write;
|
||||
struct drm_vc4_submit_rcl_surface msaa_color_write;
|
||||
struct drm_vc4_submit_rcl_surface msaa_zs_write;
|
||||
__u32 clear_color[2];
|
||||
__u32 clear_z;
|
||||
__u8 clear_s;
|
||||
|
||||
__u32 pad:24;
|
||||
|
||||
#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
|
||||
/* By default, the kernel gets to choose the order that the tiles are
|
||||
* rendered in. If this is set, then the tiles will be rendered in a
|
||||
* raster order, with the right-to-left vs left-to-right and
|
||||
* top-to-bottom vs bottom-to-top dictated by
|
||||
* VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping
|
||||
* blits to be implemented using the 3D engine.
|
||||
*/
|
||||
#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1)
|
||||
#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2)
|
||||
#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3)
|
||||
__u32 flags;
|
||||
|
||||
/* Returned value of the seqno of this render job (for the
|
||||
* wait ioctl).
|
||||
*/
|
||||
__u64 seqno;
|
||||
|
||||
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
|
||||
__u32 perfmonid;
|
||||
|
||||
/* Syncobj handle to wait on. If set, processing of this render job
|
||||
* will not start until the syncobj is signaled. 0 means ignore.
|
||||
*/
|
||||
__u32 in_sync;
|
||||
|
||||
/* Syncobj handle to export fence to. If set, the fence in the syncobj
|
||||
* will be replaced with a fence that signals upon completion of this
|
||||
* render job. 0 means ignore.
|
||||
*/
|
||||
__u32 out_sync;
|
||||
|
||||
__u32 pad2;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vc4_wait_seqno - ioctl argument for waiting for
|
||||
* DRM_VC4_SUBMIT_CL completion using its returned seqno.
|
||||
*
|
||||
* timeout_ns is the timeout in nanoseconds, where "0" means "don't
|
||||
* block, just return the status."
|
||||
*/
|
||||
struct drm_vc4_wait_seqno {
|
||||
__u64 seqno;
|
||||
__u64 timeout_ns;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vc4_wait_bo - ioctl argument for waiting for
|
||||
* completion of the last DRM_VC4_SUBMIT_CL on a BO.
|
||||
*
|
||||
* This is useful for cases where multiple processes might be
|
||||
* rendering to a BO and you want to wait for all rendering to be
|
||||
* completed.
|
||||
*/
|
||||
struct drm_vc4_wait_bo {
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
__u64 timeout_ns;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
|
||||
*
|
||||
* There are currently no values for the flags argument, but it may be
|
||||
* used in a future extension.
|
||||
*/
|
||||
struct drm_vc4_create_bo {
|
||||
__u32 size;
|
||||
__u32 flags;
|
||||
/** Returned GEM handle for the BO. */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
|
||||
*
|
||||
* This doesn't actually perform an mmap. Instead, it returns the
|
||||
* offset you need to use in an mmap on the DRM device node. This
|
||||
* means that tools like valgrind end up knowing about the mapped
|
||||
* memory.
|
||||
*
|
||||
* There are currently no values for the flags argument, but it may be
|
||||
* used in a future extension.
|
||||
*/
|
||||
struct drm_vc4_mmap_bo {
|
||||
/** Handle for the object being mapped. */
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
/** offset into the drm node to use for subsequent mmap call. */
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
|
||||
* shader BOs.
|
||||
*
|
||||
* Since allowing a shader to be overwritten while it's also being
|
||||
* executed from would allow privlege escalation, shaders must be
|
||||
* created using this ioctl, and they can't be mmapped later.
|
||||
*/
|
||||
struct drm_vc4_create_shader_bo {
|
||||
/* Size of the data argument. */
|
||||
__u32 size;
|
||||
/* Flags, currently must be 0. */
|
||||
__u32 flags;
|
||||
|
||||
/* Pointer to the data. */
|
||||
__u64 data;
|
||||
|
||||
/** Returned GEM handle for the BO. */
|
||||
__u32 handle;
|
||||
/* Pad, must be 0. */
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_vc4_get_hang_state_bo {
|
||||
__u32 handle;
|
||||
__u32 paddr;
|
||||
__u32 size;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vc4_hang_state - ioctl argument for collecting state
|
||||
* from a GPU hang for analysis.
|
||||
*/
|
||||
struct drm_vc4_get_hang_state {
|
||||
/** Pointer to array of struct drm_vc4_get_hang_state_bo. */
|
||||
__u64 bo;
|
||||
/**
|
||||
* On input, the size of the bo array. Output is the number
|
||||
* of bos to be returned.
|
||||
*/
|
||||
__u32 bo_count;
|
||||
|
||||
__u32 start_bin, start_render;
|
||||
|
||||
__u32 ct0ca, ct0ea;
|
||||
__u32 ct1ca, ct1ea;
|
||||
__u32 ct0cs, ct1cs;
|
||||
__u32 ct0ra0, ct1ra0;
|
||||
|
||||
__u32 bpca, bpcs;
|
||||
__u32 bpoa, bpos;
|
||||
|
||||
__u32 vpmbase;
|
||||
|
||||
__u32 dbge;
|
||||
__u32 fdbgo;
|
||||
__u32 fdbgb;
|
||||
__u32 fdbgr;
|
||||
__u32 fdbgs;
|
||||
__u32 errstat;
|
||||
|
||||
/* Pad that we may save more registers into in the future. */
|
||||
__u32 pad[16];
|
||||
};
|
||||
|
||||
#define DRM_VC4_PARAM_V3D_IDENT0 0
|
||||
#define DRM_VC4_PARAM_V3D_IDENT1 1
|
||||
#define DRM_VC4_PARAM_V3D_IDENT2 2
|
||||
#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
|
||||
#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
|
||||
#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
|
||||
#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
|
||||
#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
|
||||
#define DRM_VC4_PARAM_SUPPORTS_PERFMON 8
|
||||
|
||||
struct drm_vc4_get_param {
|
||||
__u32 param;
|
||||
__u32 pad;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
struct drm_vc4_get_tiling {
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
__u64 modifier;
|
||||
};
|
||||
|
||||
struct drm_vc4_set_tiling {
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
__u64 modifier;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
|
||||
*/
|
||||
struct drm_vc4_label_bo {
|
||||
__u32 handle;
|
||||
__u32 len;
|
||||
__u64 name;
|
||||
};
|
||||
|
||||
/*
|
||||
* States prefixed with '__' are internal states and cannot be passed to the
|
||||
* DRM_IOCTL_VC4_GEM_MADVISE ioctl.
|
||||
*/
|
||||
#define VC4_MADV_WILLNEED 0
|
||||
#define VC4_MADV_DONTNEED 1
|
||||
#define __VC4_MADV_PURGED 2
|
||||
#define __VC4_MADV_NOTSUPP 3
|
||||
|
||||
struct drm_vc4_gem_madvise {
|
||||
__u32 handle;
|
||||
__u32 madv;
|
||||
__u32 retained;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
enum {
|
||||
VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER,
|
||||
VC4_PERFCNT_FEP_VALID_PRIMS_RENDER,
|
||||
VC4_PERFCNT_FEP_CLIPPED_QUADS,
|
||||
VC4_PERFCNT_FEP_VALID_QUADS,
|
||||
VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL,
|
||||
VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL,
|
||||
VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL,
|
||||
VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE,
|
||||
VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE,
|
||||
VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF,
|
||||
VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT,
|
||||
VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING,
|
||||
VC4_PERFCNT_PSE_PRIMS_REVERSED,
|
||||
VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES,
|
||||
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING,
|
||||
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING,
|
||||
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST,
|
||||
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS,
|
||||
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD,
|
||||
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS,
|
||||
VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT,
|
||||
VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS,
|
||||
VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT,
|
||||
VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS,
|
||||
VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED,
|
||||
VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS,
|
||||
VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED,
|
||||
VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED,
|
||||
VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT,
|
||||
VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS,
|
||||
VC4_PERFCNT_NUM_EVENTS,
|
||||
};
|
||||
|
||||
#define DRM_VC4_MAX_PERF_COUNTERS 16
|
||||
|
||||
struct drm_vc4_perfmon_create {
|
||||
__u32 id;
|
||||
__u32 ncounters;
|
||||
__u8 events[DRM_VC4_MAX_PERF_COUNTERS];
|
||||
};
|
||||
|
||||
struct drm_vc4_perfmon_destroy {
|
||||
__u32 id;
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns the values of the performance counters tracked by this
|
||||
* perfmon (as an array of ncounters u64 values).
|
||||
*
|
||||
* No implicit synchronization is performed, so the user has to
|
||||
* guarantee that any jobs using this perfmon have already been
|
||||
* completed (probably by blocking on the seqno returned by the
|
||||
* last exec that used the perfmon).
|
||||
*/
|
||||
struct drm_vc4_perfmon_get_values {
|
||||
__u32 id;
|
||||
__u64 values_ptr;
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _VC4_DRM_H_ */
|
62
drm/vgem_drm.h
Normal file
62
drm/vgem_drm.h
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright 2016 Intel Corporation
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _VGEM_DRM_H_
|
||||
#define _VGEM_DRM_H_
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Please note that modifications to all structs defined here are
|
||||
* subject to backwards-compatibility constraints.
|
||||
*/
|
||||
#define DRM_VGEM_FENCE_ATTACH 0x1
|
||||
#define DRM_VGEM_FENCE_SIGNAL 0x2
|
||||
|
||||
#define DRM_IOCTL_VGEM_FENCE_ATTACH DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
|
||||
#define DRM_IOCTL_VGEM_FENCE_SIGNAL DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
|
||||
|
||||
struct drm_vgem_fence_attach {
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
#define VGEM_FENCE_WRITE 0x1
|
||||
__u32 out_fence;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_vgem_fence_signal {
|
||||
__u32 fence;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _VGEM_DRM_H_ */
|
282
drm/via_drm.h
Normal file
282
drm/via_drm.h
Normal file
@ -0,0 +1,282 @@
|
||||
/*
|
||||
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
|
||||
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef _VIA_DRM_H_
|
||||
#define _VIA_DRM_H_
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* WARNING: These defines must be the same as what the Xserver uses.
|
||||
* if you change them, you must change the defines in the Xserver.
|
||||
*/
|
||||
|
||||
#ifndef _VIA_DEFINES_
|
||||
#define _VIA_DEFINES_
|
||||
|
||||
|
||||
#define VIA_NR_SAREA_CLIPRECTS 8
|
||||
#define VIA_NR_XVMC_PORTS 10
|
||||
#define VIA_NR_XVMC_LOCKS 5
|
||||
#define VIA_MAX_CACHELINE_SIZE 64
|
||||
#define XVMCLOCKPTR(saPriv,lockNo) \
|
||||
((__volatile__ struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
|
||||
(VIA_MAX_CACHELINE_SIZE - 1)) & \
|
||||
~(VIA_MAX_CACHELINE_SIZE - 1)) + \
|
||||
VIA_MAX_CACHELINE_SIZE*(lockNo)))
|
||||
|
||||
/* Each region is a minimum of 64k, and there are at most 64 of them.
|
||||
*/
|
||||
#define VIA_NR_TEX_REGIONS 64
|
||||
#define VIA_LOG_MIN_TEX_REGION_SIZE 16
|
||||
#endif
|
||||
|
||||
#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
|
||||
#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
|
||||
#define VIA_UPLOAD_CTX 0x4
|
||||
#define VIA_UPLOAD_BUFFERS 0x8
|
||||
#define VIA_UPLOAD_TEX0 0x10
|
||||
#define VIA_UPLOAD_TEX1 0x20
|
||||
#define VIA_UPLOAD_CLIPRECTS 0x40
|
||||
#define VIA_UPLOAD_ALL 0xff
|
||||
|
||||
/* VIA specific ioctls */
|
||||
#define DRM_VIA_ALLOCMEM 0x00
|
||||
#define DRM_VIA_FREEMEM 0x01
|
||||
#define DRM_VIA_AGP_INIT 0x02
|
||||
#define DRM_VIA_FB_INIT 0x03
|
||||
#define DRM_VIA_MAP_INIT 0x04
|
||||
#define DRM_VIA_DEC_FUTEX 0x05
|
||||
#define NOT_USED
|
||||
#define DRM_VIA_DMA_INIT 0x07
|
||||
#define DRM_VIA_CMDBUFFER 0x08
|
||||
#define DRM_VIA_FLUSH 0x09
|
||||
#define DRM_VIA_PCICMD 0x0a
|
||||
#define DRM_VIA_CMDBUF_SIZE 0x0b
|
||||
#define NOT_USED
|
||||
#define DRM_VIA_WAIT_IRQ 0x0d
|
||||
#define DRM_VIA_DMA_BLIT 0x0e
|
||||
#define DRM_VIA_BLIT_SYNC 0x0f
|
||||
|
||||
#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
|
||||
#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
|
||||
#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
|
||||
#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
|
||||
#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
|
||||
#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
|
||||
#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
|
||||
#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
|
||||
#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
|
||||
#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
|
||||
#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
|
||||
drm_via_cmdbuf_size_t)
|
||||
#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
|
||||
#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
|
||||
#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
|
||||
|
||||
/* Indices into buf.Setup where various bits of state are mirrored per
|
||||
* context and per buffer. These can be fired at the card as a unit,
|
||||
* or in a piecewise fashion as required.
|
||||
*/
|
||||
|
||||
#define VIA_TEX_SETUP_SIZE 8
|
||||
|
||||
/* Flags for clear ioctl
|
||||
*/
|
||||
#define VIA_FRONT 0x1
|
||||
#define VIA_BACK 0x2
|
||||
#define VIA_DEPTH 0x4
|
||||
#define VIA_STENCIL 0x8
|
||||
#define VIA_MEM_VIDEO 0 /* matches drm constant */
|
||||
#define VIA_MEM_AGP 1 /* matches drm constant */
|
||||
#define VIA_MEM_SYSTEM 2
|
||||
#define VIA_MEM_MIXED 3
|
||||
#define VIA_MEM_UNKNOWN 4
|
||||
|
||||
typedef struct {
|
||||
__u32 offset;
|
||||
__u32 size;
|
||||
} drm_via_agp_t;
|
||||
|
||||
typedef struct {
|
||||
__u32 offset;
|
||||
__u32 size;
|
||||
} drm_via_fb_t;
|
||||
|
||||
typedef struct {
|
||||
__u32 context;
|
||||
__u32 type;
|
||||
__u32 size;
|
||||
unsigned long index;
|
||||
unsigned long offset;
|
||||
} drm_via_mem_t;
|
||||
|
||||
typedef struct _drm_via_init {
|
||||
enum {
|
||||
VIA_INIT_MAP = 0x01,
|
||||
VIA_CLEANUP_MAP = 0x02
|
||||
} func;
|
||||
|
||||
unsigned long sarea_priv_offset;
|
||||
unsigned long fb_offset;
|
||||
unsigned long mmio_offset;
|
||||
unsigned long agpAddr;
|
||||
} drm_via_init_t;
|
||||
|
||||
typedef struct _drm_via_futex {
|
||||
enum {
|
||||
VIA_FUTEX_WAIT = 0x00,
|
||||
VIA_FUTEX_WAKE = 0X01
|
||||
} func;
|
||||
__u32 ms;
|
||||
__u32 lock;
|
||||
__u32 val;
|
||||
} drm_via_futex_t;
|
||||
|
||||
typedef struct _drm_via_dma_init {
|
||||
enum {
|
||||
VIA_INIT_DMA = 0x01,
|
||||
VIA_CLEANUP_DMA = 0x02,
|
||||
VIA_DMA_INITIALIZED = 0x03
|
||||
} func;
|
||||
|
||||
unsigned long offset;
|
||||
unsigned long size;
|
||||
unsigned long reg_pause_addr;
|
||||
} drm_via_dma_init_t;
|
||||
|
||||
typedef struct _drm_via_cmdbuffer {
|
||||
char *buf;
|
||||
unsigned long size;
|
||||
} drm_via_cmdbuffer_t;
|
||||
|
||||
/* Warning: If you change the SAREA structure you must change the Xserver
|
||||
* structure as well */
|
||||
|
||||
typedef struct _drm_via_tex_region {
|
||||
unsigned char next, prev; /* indices to form a circular LRU */
|
||||
unsigned char inUse; /* owned by a client, or free? */
|
||||
int age; /* tracked by clients to update local LRU's */
|
||||
} drm_via_tex_region_t;
|
||||
|
||||
typedef struct _drm_via_sarea {
|
||||
unsigned int dirty;
|
||||
unsigned int nbox;
|
||||
struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
|
||||
drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
|
||||
int texAge; /* last time texture was uploaded */
|
||||
int ctxOwner; /* last context to upload state */
|
||||
int vertexPrim;
|
||||
|
||||
/*
|
||||
* Below is for XvMC.
|
||||
* We want the lock integers alone on, and aligned to, a cache line.
|
||||
* Therefore this somewhat strange construct.
|
||||
*/
|
||||
|
||||
char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
|
||||
|
||||
unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
|
||||
unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
|
||||
unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
|
||||
|
||||
/* Used by the 3d driver only at this point, for pageflipping:
|
||||
*/
|
||||
unsigned int pfCurrentOffset;
|
||||
} drm_via_sarea_t;
|
||||
|
||||
typedef struct _drm_via_cmdbuf_size {
|
||||
enum {
|
||||
VIA_CMDBUF_SPACE = 0x01,
|
||||
VIA_CMDBUF_LAG = 0x02
|
||||
} func;
|
||||
int wait;
|
||||
__u32 size;
|
||||
} drm_via_cmdbuf_size_t;
|
||||
|
||||
typedef enum {
|
||||
VIA_IRQ_ABSOLUTE = 0x0,
|
||||
VIA_IRQ_RELATIVE = 0x1,
|
||||
VIA_IRQ_SIGNAL = 0x10000000,
|
||||
VIA_IRQ_FORCE_SEQUENCE = 0x20000000
|
||||
} via_irq_seq_type_t;
|
||||
|
||||
#define VIA_IRQ_FLAGS_MASK 0xF0000000
|
||||
|
||||
enum drm_via_irqs {
|
||||
drm_via_irq_hqv0 = 0,
|
||||
drm_via_irq_hqv1,
|
||||
drm_via_irq_dma0_dd,
|
||||
drm_via_irq_dma0_td,
|
||||
drm_via_irq_dma1_dd,
|
||||
drm_via_irq_dma1_td,
|
||||
drm_via_irq_num
|
||||
};
|
||||
|
||||
struct drm_via_wait_irq_request {
|
||||
unsigned irq;
|
||||
via_irq_seq_type_t type;
|
||||
__u32 sequence;
|
||||
__u32 signal;
|
||||
};
|
||||
|
||||
typedef union drm_via_irqwait {
|
||||
struct drm_via_wait_irq_request request;
|
||||
struct drm_wait_vblank_reply reply;
|
||||
} drm_via_irqwait_t;
|
||||
|
||||
typedef struct drm_via_blitsync {
|
||||
__u32 sync_handle;
|
||||
unsigned engine;
|
||||
} drm_via_blitsync_t;
|
||||
|
||||
/* - * Below,"flags" is currently unused but will be used for possible future
|
||||
* extensions like kernel space bounce buffers for bad alignments and
|
||||
* blit engine busy-wait polling for better latency in the absence of
|
||||
* interrupts.
|
||||
*/
|
||||
|
||||
typedef struct drm_via_dmablit {
|
||||
__u32 num_lines;
|
||||
__u32 line_length;
|
||||
|
||||
__u32 fb_addr;
|
||||
__u32 fb_stride;
|
||||
|
||||
unsigned char *mem_addr;
|
||||
__u32 mem_stride;
|
||||
|
||||
__u32 flags;
|
||||
int to_fb;
|
||||
|
||||
drm_via_blitsync_t sync;
|
||||
} drm_via_dmablit_t;
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _VIA_DRM_H_ */
|
254
drm/virtgpu_drm.h
Normal file
254
drm/virtgpu_drm.h
Normal file
@ -0,0 +1,254 @@
|
||||
/*
|
||||
* Copyright 2013 Red Hat
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef VIRTGPU_DRM_H
|
||||
#define VIRTGPU_DRM_H
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Please note that modifications to all structs defined here are
|
||||
* subject to backwards-compatibility constraints.
|
||||
*
|
||||
* Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
|
||||
* compatibility Keep fields aligned to their size
|
||||
*/
|
||||
|
||||
#define DRM_VIRTGPU_MAP 0x01
|
||||
#define DRM_VIRTGPU_EXECBUFFER 0x02
|
||||
#define DRM_VIRTGPU_GETPARAM 0x03
|
||||
#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
|
||||
#define DRM_VIRTGPU_RESOURCE_INFO 0x05
|
||||
#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
|
||||
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
|
||||
#define DRM_VIRTGPU_WAIT 0x08
|
||||
#define DRM_VIRTGPU_GET_CAPS 0x09
|
||||
#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
|
||||
#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
|
||||
|
||||
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
|
||||
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
|
||||
#define VIRTGPU_EXECBUF_RING_IDX 0x04
|
||||
#define VIRTGPU_EXECBUF_FLAGS (\
|
||||
VIRTGPU_EXECBUF_FENCE_FD_IN |\
|
||||
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
|
||||
VIRTGPU_EXECBUF_RING_IDX |\
|
||||
0)
|
||||
|
||||
struct drm_virtgpu_map {
|
||||
__u64 offset; /* use for mmap system call */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
|
||||
struct drm_virtgpu_execbuffer {
|
||||
__u32 flags;
|
||||
__u32 size;
|
||||
__u64 command; /* void* */
|
||||
__u64 bo_handles;
|
||||
__u32 num_bo_handles;
|
||||
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
|
||||
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
|
||||
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
|
||||
#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
|
||||
#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
|
||||
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
|
||||
#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
|
||||
#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
|
||||
|
||||
struct drm_virtgpu_getparam {
|
||||
__u64 param;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
/* NO_BO flags? NO resource flag? */
|
||||
/* resource flag for y_0_top */
|
||||
struct drm_virtgpu_resource_create {
|
||||
__u32 target;
|
||||
__u32 format;
|
||||
__u32 bind;
|
||||
__u32 width;
|
||||
__u32 height;
|
||||
__u32 depth;
|
||||
__u32 array_size;
|
||||
__u32 last_level;
|
||||
__u32 nr_samples;
|
||||
__u32 flags;
|
||||
__u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
|
||||
__u32 res_handle; /* returned by kernel */
|
||||
__u32 size; /* validate transfer in the host */
|
||||
__u32 stride; /* validate transfer in the host */
|
||||
};
|
||||
|
||||
struct drm_virtgpu_resource_info {
|
||||
__u32 bo_handle;
|
||||
__u32 res_handle;
|
||||
__u32 size;
|
||||
__u32 blob_mem;
|
||||
};
|
||||
|
||||
struct drm_virtgpu_3d_box {
|
||||
__u32 x;
|
||||
__u32 y;
|
||||
__u32 z;
|
||||
__u32 w;
|
||||
__u32 h;
|
||||
__u32 d;
|
||||
};
|
||||
|
||||
struct drm_virtgpu_3d_transfer_to_host {
|
||||
__u32 bo_handle;
|
||||
struct drm_virtgpu_3d_box box;
|
||||
__u32 level;
|
||||
__u32 offset;
|
||||
__u32 stride;
|
||||
__u32 layer_stride;
|
||||
};
|
||||
|
||||
struct drm_virtgpu_3d_transfer_from_host {
|
||||
__u32 bo_handle;
|
||||
struct drm_virtgpu_3d_box box;
|
||||
__u32 level;
|
||||
__u32 offset;
|
||||
__u32 stride;
|
||||
__u32 layer_stride;
|
||||
};
|
||||
|
||||
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
|
||||
struct drm_virtgpu_3d_wait {
|
||||
__u32 handle; /* 0 is an invalid handle */
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct drm_virtgpu_get_caps {
|
||||
__u32 cap_set_id;
|
||||
__u32 cap_set_ver;
|
||||
__u64 addr;
|
||||
__u32 size;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_virtgpu_resource_create_blob {
|
||||
#define VIRTGPU_BLOB_MEM_GUEST 0x0001
|
||||
#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
|
||||
#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
|
||||
|
||||
#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
|
||||
#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
|
||||
#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
|
||||
/* zero is invalid blob_mem */
|
||||
__u32 blob_mem;
|
||||
__u32 blob_flags;
|
||||
__u32 bo_handle;
|
||||
__u32 res_handle;
|
||||
__u64 size;
|
||||
|
||||
/*
|
||||
* for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
|
||||
* VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
|
||||
*/
|
||||
__u32 pad;
|
||||
__u32 cmd_size;
|
||||
__u64 cmd;
|
||||
__u64 blob_id;
|
||||
};
|
||||
|
||||
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
|
||||
#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
|
||||
#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
|
||||
struct drm_virtgpu_context_set_param {
|
||||
__u64 param;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
struct drm_virtgpu_context_init {
|
||||
__u32 num_params;
|
||||
__u32 pad;
|
||||
|
||||
/* pointer to drm_virtgpu_context_set_param array */
|
||||
__u64 ctx_set_params;
|
||||
};
|
||||
|
||||
/*
|
||||
* Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
|
||||
* effect. The event size is sizeof(drm_event), since there is no additional
|
||||
* payload.
|
||||
*/
|
||||
#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_MAP \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
|
||||
struct drm_virtgpu_execbuffer)
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_GETPARAM \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
|
||||
struct drm_virtgpu_getparam)
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \
|
||||
struct drm_virtgpu_resource_create)
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
|
||||
struct drm_virtgpu_resource_info)
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
|
||||
struct drm_virtgpu_3d_transfer_from_host)
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
|
||||
struct drm_virtgpu_3d_transfer_to_host)
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_WAIT \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \
|
||||
struct drm_virtgpu_3d_wait)
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_GET_CAPS \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
|
||||
struct drm_virtgpu_get_caps)
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
|
||||
struct drm_virtgpu_resource_create_blob)
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
|
||||
struct drm_virtgpu_context_init)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
1292
drm/vmwgfx_drm.h
Normal file
1292
drm/vmwgfx_drm.h
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user