接前一篇文章:libdrm全解析十 —— 源码全解析(7)
本文参考以下博文:
特此致谢!
本文继续对include/drm/drm.h中实际功能宏定义进行讲解。
12. DRM_IOCTL_GEM_OPEN
第12个宏是DRM_IOCTL_GEM_OPEN,相应代码如下:
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
结合之前文章中的_IOWR(type,nr,size)的最终定义,得到如下代码:
#define DRM_IOCTL_GEM_OPEN ( ((3) << 30) | (('d') << 8) | ((0x0b) << 0) | ((sizeof(struct drm_gem_open)) << 16) )
struct drm_gem_open在同文件(include/drm/drm.h)中定义,代码如下:
/* DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
/** Returned handle for the object */
__u32 handle;
/** Returned size of the object */
__u64 size;
};
drm_gem_open结构中的各成员的意义代码注释描述得很清楚了,在此无需赘述。
DRM_IOCTL_GEM_OPEN同样并无直接对应的Userspace API(即没有对应的封装),但是libdrm源码中有多处直接对其进行了调用,也是那些显卡对应的文件。
- amdgpu/amdgpu_bo.c中:
drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
enum amdgpu_bo_handle_type type,
uint32_t shared_handle,
struct amdgpu_bo_import_result *output)
{
struct drm_gem_open open_arg = {};
struct amdgpu_bo *bo = NULL;
uint32_t handle = 0, flink_name = 0;
uint64_t alloc_size = 0;
int r = 0;
int dma_fd;
uint64_t dma_buf_size = 0;
……
/* Open the handle. */
switch (type) {
case amdgpu_bo_handle_type_gem_flink_name:
open_arg.name = shared_handle;
r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
if (r)
goto unlock;
flink_name = shared_handle;
handle = open_arg.handle;
alloc_size = open_arg.size;
if (dev->flink_fd != dev->fd) {
r = drmPrimeHandleToFD(dev->flink_fd, handle,
DRM_CLOEXEC, &dma_fd);
if (r)
goto free_bo_handle;
r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
close(dma_fd);
if (r)
goto free_bo_handle;
r = drmCloseBufferHandle(dev->flink_fd,
open_arg.handle);
if (r)
goto free_bo_handle;
}
open_arg.handle = 0;
break;
case amdgpu_bo_handle_type_dma_buf_fd:
handle = shared_handle;
alloc_size = dma_buf_size;
break;
case amdgpu_bo_handle_type_kms:
case amdgpu_bo_handle_type_kms_noimport:
assert(0); /* unreachable */
}
……
return r;
}
- etnaviv/etnaviv_bo.c中:
/* import a buffer object from DRI2 name */
drm_public struct etna_bo *etna_bo_from_name(struct etna_device *dev,
uint32_t name)
{
struct etna_bo *bo;
struct drm_gem_open req = {
.name = name,
};
pthread_mutex_lock(&table_lock);
/* check name table first, to see if bo is already open: */
bo = lookup_bo(dev->name_table, name);
if (bo)
goto out_unlock;
if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
ERROR_MSG("gem-open failed: %s", strerror(errno));
goto out_unlock;
}
bo = lookup_bo(dev->handle_table, req.handle);
if (bo)
goto out_unlock;
bo = bo_from_handle(dev, req.size, req.handle, 0);
if (bo)
set_name(bo, name);
out_unlock:
pthread_mutex_unlock(&table_lock);
return bo;
}
- exynos/exynos_drm.c中:
/*
* Get a exynos buffer object from a gem global object name.
*
* @dev: a exynos device object.
* @name: a gem global object name exported by another process.
*
* this interface is used to get a exynos buffer object from a gem
* global object name sent by another process for buffer sharing.
*
* if true, return a exynos buffer object else NULL.
*
*/
drm_public struct exynos_bo *
exynos_bo_from_name(struct exynos_device *dev, uint32_t name)
{
struct exynos_bo *bo;
struct drm_gem_open req = {
.name = name,
};
bo = calloc(sizeof(*bo), 1);
if (!bo) {
fprintf(stderr, "failed to allocate bo[%s].\n",
strerror(errno));
return NULL;
}
if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
fprintf(stderr, "failed to open gem object[%s].\n",
strerror(errno));
goto err_free_bo;
}
bo->dev = dev;
bo->name = name;
bo->handle = req.handle;
return bo;
err_free_bo:
free(bo);
return NULL;
}
- freedreno/freedreno_bo.c中:
drm_public struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
{
struct drm_gem_open req = {
.name = name,
};
struct fd_bo *bo;
pthread_mutex_lock(&table_lock);
/* check name table first, to see if bo is already open: */
bo = lookup_bo(dev->name_table, name);
if (bo)
goto out_unlock;
if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
ERROR_MSG("gem-open failed: %s", strerror(errno));
goto out_unlock;
}
bo = lookup_bo(dev->handle_table, req.handle);
if (bo)
goto out_unlock;
bo = bo_from_handle(dev, req.size, req.handle);
if (bo) {
set_name(bo, name);
VG_BO_ALLOC(bo);
}
out_unlock:
pthread_mutex_unlock(&table_lock);
return bo;
}
- intel/intel_bufmgr_gem.c中:
/**
* Returns a drm_intel_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
drm_intel_bo_gem *bo_gem;
int ret;
struct drm_gem_open open_arg;
……
memclear(open_arg);
open_arg.name = handle;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_GEM_OPEN,
&open_arg);
if (ret != 0) {
DBG("Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
bo_gem = NULL;
goto out;
}
……
}
- nouveau/nouveau.c中:
drm_public int
nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
struct nouveau_bo **pbo)
{
struct nouveau_drm *drm = nouveau_drm(&dev->object);
struct nouveau_device_priv *nvdev = nouveau_device(dev);
struct nouveau_bo_priv *nvbo;
struct drm_gem_open req = { .name = name };
int ret;
pthread_mutex_lock(&nvdev->lock);
DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
if (nvbo->name == name) {
ret = nouveau_bo_wrap_locked(dev, nvbo->base.handle,
pbo, name);
pthread_mutex_unlock(&nvdev->lock);
return ret;
}
}
ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_OPEN, &req);
if (ret == 0) {
ret = nouveau_bo_wrap_locked(dev, req.handle, pbo, name);
}
pthread_mutex_unlock(&nvdev->lock);
return ret;
}
- omap/omap_drm.c中:
/* import a buffer object from DRI2 name */
drm_public struct omap_bo *
omap_bo_from_name(struct omap_device *dev, uint32_t name)
{
struct omap_bo *bo = NULL;
struct drm_gem_open req = {
.name = name,
};
pthread_mutex_lock(&table_lock);
if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
goto fail;
}
bo = lookup_bo(dev, req.handle);
if (!bo) {
bo = bo_from_handle(dev, req.handle);
bo->name = name;
}
pthread_mutex_unlock(&table_lock);
return bo;
fail:
pthread_mutex_unlock(&table_lock);
free(bo);
return NULL;
}
- radeon/radeon_bo_gem.c中:
每种显卡都会调用DRM_IOCTL_GEM_FLINK对应的drmIoctl。
其余宏定义将在后续文章中继续解析。
- tegra/tegra.c中:
drm_public int
drm_tegra_bo_open(struct drm_tegra *drm, uint32_t name, uint32_t flags,
struct drm_tegra_bo **bop)
{
struct drm_gem_open args;
struct drm_tegra_bo *bo;
int err;
bo = drm_tegra_bo_alloc(drm, 0, flags, 0);
if (!bo)
return -ENOMEM;
memset(&args, 0, sizeof(args));
args.name = name;
err = drmIoctl(drm->fd, DRM_IOCTL_GEM_OPEN, &args);
if (err < 0)
goto free;
bo->handle = args.handle;
bo->size = args.size;
*bop = bo;
return 0;
free:
free(bo);
return err;
}
类似于DRM_IOCTL_GEM_FLINK,每种显卡都会调用DRM_IOCTL_GEM_OPEN对应的drmIoctl。
其余宏定义将在后续文章中继续解析。