@@ -113,7 +113,7 @@ extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
static inline enum tiler_fmt gem2fmt(u32 flags)
{
- switch (flags & OMAP_BO_TILED) {
+ switch (flags & OMAP_BO_TILED_MASK) {
case OMAP_BO_TILED_8:
return TILFMT_8BIT;
case OMAP_BO_TILED_16:
@@ -76,7 +76,7 @@ static u32 get_linear_addr(struct drm_framebuffer *fb,
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -135,7 +135,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@@ -193,7 +193,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
plane = &omap_fb->planes[1];
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED_MASK));
omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
@@ -194,7 +194,7 @@ static void omap_gem_evict(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
@@ -322,7 +322,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
size_t size = obj->size;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
/* for tiled buffers, the virtual size has stride rounded up
* to 4kb.. (to hide the fact that row n+1 might start 16kb or
* 32kb later!). But we don't back the entire buffer with
@@ -511,7 +511,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
* probably trigger put_pages()?
*/
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = omap_gem_fault_2d(obj, vma, vmf);
else
ret = omap_gem_fault_1d(obj, vma, vmf);
@@ -784,7 +784,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
if (ret)
goto fail;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt,
omap_obj->width,
omap_obj->height, 0);
@@ -890,7 +890,7 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
mutex_lock(&omap_obj->lock);
if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
- (omap_obj->flags & OMAP_BO_TILED)) {
+ (omap_obj->flags & OMAP_BO_TILED_MASK)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
@@ -905,7 +905,7 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
return ret;
}
@@ -1044,7 +1044,7 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
refcount_read(&omap_obj->dma_addr_cnt),
omap_obj->vaddr, omap_obj->roll);
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
if (omap_obj->block) {
struct tcm_area *area = &omap_obj->block->area;
@@ -1143,7 +1143,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
int ret;
/* Validate the flags and compute the memory and cache flags. */
- if (flags & OMAP_BO_TILED) {
+ if (flags & OMAP_BO_TILED_MASK) {
if (!priv->usergart) {
dev_err(dev->dev, "Tiled buffers require DMM\n");
return NULL;
@@ -1185,7 +1185,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->flags = flags;
mutex_init(&omap_obj->lock);
- if (flags & OMAP_BO_TILED) {
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* For tiled buffers align dimensions to slot boundaries and
* calculate size based on aligned dimensions.
@@ -64,7 +64,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
{
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
- if (omap_gem_flags(obj) & OMAP_BO_TILED) {
+ if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
/* TODO we would need to pin at least part of the buffer to
* get de-tiled view. For now just reject it.
*/
@@ -52,7 +52,6 @@ struct drm_omap_param {
#define OMAP_BO_TILED_16 0x00000200
#define OMAP_BO_TILED_32 0x00000300
#define OMAP_BO_TILED_MASK 0x00000f00
-#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
union omap_gem_size {
__u32 bytes; /* (for non-tiled formats) */