blob: 9ea146752ee4818b2e022376f765d562d1d8bc48 [file] [log] [blame]
Sean Paulda6270d2015-06-01 14:11:52 -04001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Sean Paul0aee6b22016-05-10 04:08:10 -040017#define LOG_TAG "hwc-platform-nv"
Sean Paulda6270d2015-06-01 14:11:52 -040018
19#include "drmresources.h"
Sean Paul63769962016-04-21 16:25:06 -040020#include "platform.h"
Sean Paulea045b72016-04-21 16:39:02 -040021#include "platformnv.h"
Sean Paulda6270d2015-06-01 14:11:52 -040022
Sean Paul5325e102016-03-29 13:55:35 -040023#include <cinttypes>
Sean Paul419b5e02015-06-10 14:30:47 -040024#include <stdatomic.h>
Adrian Salidoee24aca2017-07-17 17:58:50 -070025#include <drm/drm_fourcc.h>
Sean Paulda6270d2015-06-01 14:11:52 -040026#include <xf86drm.h>
27#include <xf86drmMode.h>
28
29#include <cutils/log.h>
30#include <hardware/gralloc.h>
31
32namespace android {
33
34#ifdef USE_NVIDIA_IMPORTER
35// static
36Importer *Importer::CreateInstance(DrmResources *drm) {
37 NvImporter *importer = new NvImporter(drm);
38 if (!importer)
39 return NULL;
40
41 int ret = importer->Init();
42 if (ret) {
43 ALOGE("Failed to initialize the nv importer %d", ret);
44 delete importer;
45 return NULL;
46 }
47 return importer;
48}
49#endif
50
51NvImporter::NvImporter(DrmResources *drm) : drm_(drm) {
52}
53
54NvImporter::~NvImporter() {
55}
56
57int NvImporter::Init() {
58 int ret = hw_get_module(GRALLOC_HARDWARE_MODULE_ID,
59 (const hw_module_t **)&gralloc_);
60 if (ret) {
61 ALOGE("Failed to open gralloc module %d", ret);
62 return ret;
63 }
64
65 if (strcasecmp(gralloc_->common.author, "NVIDIA"))
66 ALOGW("Using non-NVIDIA gralloc module: %s/%s\n", gralloc_->common.name,
67 gralloc_->common.author);
68
69 return 0;
70}
71
72int NvImporter::ImportBuffer(buffer_handle_t handle, hwc_drm_bo_t *bo) {
73 memset(bo, 0, sizeof(hwc_drm_bo_t));
74 NvBuffer_t *buf = GrallocGetNvBuffer(handle);
75 if (buf) {
Sean Paul419b5e02015-06-10 14:30:47 -040076 atomic_fetch_add(&buf->ref, 1);
Sean Paulda6270d2015-06-01 14:11:52 -040077 *bo = buf->bo;
78 return 0;
79 }
80
81 buf = new NvBuffer_t();
82 if (!buf) {
83 ALOGE("Failed to allocate new NvBuffer_t");
84 return -ENOMEM;
85 }
Sean Paul419b5e02015-06-10 14:30:47 -040086 buf->bo.priv = buf;
Sean Paulda6270d2015-06-01 14:11:52 -040087 buf->importer = this;
88
Sean Paul419b5e02015-06-10 14:30:47 -040089 // We initialize the reference count to 2 since NvGralloc is still using this
90 // buffer (will be cleared in the NvGrallocRelease), and the other
91 // reference is for HWC (this ImportBuffer call).
92 atomic_init(&buf->ref, 2);
93
Sean Paulda6270d2015-06-01 14:11:52 -040094 int ret = gralloc_->perform(gralloc_, GRALLOC_MODULE_PERFORM_DRM_IMPORT,
95 drm_->fd(), handle, &buf->bo);
96 if (ret) {
97 ALOGE("GRALLOC_MODULE_PERFORM_DRM_IMPORT failed %d", ret);
98 delete buf;
99 return ret;
100 }
101
102 ret = drmModeAddFB2(drm_->fd(), buf->bo.width, buf->bo.height, buf->bo.format,
103 buf->bo.gem_handles, buf->bo.pitches, buf->bo.offsets,
104 &buf->bo.fb_id, 0);
105 if (ret) {
106 ALOGE("Failed to add fb %d", ret);
107 ReleaseBufferImpl(&buf->bo);
108 delete buf;
109 return ret;
110 }
111
112 ret = GrallocSetNvBuffer(handle, buf);
113 if (ret) {
114 /* This will happen is persist.tegra.gpu_mapping_cache is 0/off,
115 * or if NV gralloc runs out of "priv slots" (currently 3 per buffer,
116 * only one of which should be used by drm_hwcomposer). */
117 ALOGE("Failed to register free callback for imported buffer %d", ret);
118 ReleaseBufferImpl(&buf->bo);
119 delete buf;
120 return ret;
121 }
122 *bo = buf->bo;
123 return 0;
124}
125
Zach Reiznerc6520e42015-08-13 14:32:09 -0700126int NvImporter::ReleaseBuffer(hwc_drm_bo_t *bo) {
Sean Paul419b5e02015-06-10 14:30:47 -0400127 NvBuffer_t *buf = (NvBuffer_t *)bo->priv;
128 if (!buf) {
Sean Paul5325e102016-03-29 13:55:35 -0400129 ALOGE("Freeing bo %" PRIu32 ", buf is NULL!", bo->fb_id);
Sean Paul419b5e02015-06-10 14:30:47 -0400130 return 0;
131 }
132 if (atomic_fetch_sub(&buf->ref, 1) > 1)
133 return 0;
134
135 ReleaseBufferImpl(bo);
136 delete buf;
Sean Paulda6270d2015-06-01 14:11:52 -0400137 return 0;
138}
139
140// static
Sean Paul419b5e02015-06-10 14:30:47 -0400141void NvImporter::NvGrallocRelease(void *nv_buffer) {
142 NvBuffer_t *buf = (NvBuffer *)nv_buffer;
143 buf->importer->ReleaseBuffer(&buf->bo);
Sean Paulda6270d2015-06-01 14:11:52 -0400144}
145
146void NvImporter::ReleaseBufferImpl(hwc_drm_bo_t *bo) {
147 if (bo->fb_id) {
148 int ret = drmModeRmFB(drm_->fd(), bo->fb_id);
149 if (ret)
150 ALOGE("Failed to rm fb %d", ret);
151 }
152
153 struct drm_gem_close gem_close;
154 memset(&gem_close, 0, sizeof(gem_close));
155 int num_gem_handles = sizeof(bo->gem_handles) / sizeof(bo->gem_handles[0]);
156 for (int i = 0; i < num_gem_handles; i++) {
157 if (!bo->gem_handles[i])
158 continue;
159
160 gem_close.handle = bo->gem_handles[i];
161 int ret = drmIoctl(drm_->fd(), DRM_IOCTL_GEM_CLOSE, &gem_close);
Isaac Simha6e02c9d2015-10-14 11:40:29 -0700162 if (ret) {
Sean Paulda6270d2015-06-01 14:11:52 -0400163 ALOGE("Failed to close gem handle %d %d", i, ret);
Isaac Simha6e02c9d2015-10-14 11:40:29 -0700164 } else {
165 /* Clear any duplicate gem handle as well but don't close again */
Haixia Shi479412c2015-10-27 10:40:48 -0700166 for (int j = i + 1; j < num_gem_handles; j++)
167 if (bo->gem_handles[j] == bo->gem_handles[i])
Isaac Simha6e02c9d2015-10-14 11:40:29 -0700168 bo->gem_handles[j] = 0;
Sean Paulda6270d2015-06-01 14:11:52 -0400169 bo->gem_handles[i] = 0;
Isaac Simha6e02c9d2015-10-14 11:40:29 -0700170 }
Sean Paulda6270d2015-06-01 14:11:52 -0400171 }
172}
173
174NvImporter::NvBuffer_t *NvImporter::GrallocGetNvBuffer(buffer_handle_t handle) {
175 void *priv = NULL;
176 int ret =
177 gralloc_->perform(gralloc_, GRALLOC_MODULE_PERFORM_GET_IMPORTER_PRIVATE,
Sean Paul419b5e02015-06-10 14:30:47 -0400178 handle, NvGrallocRelease, &priv);
Sean Paulda6270d2015-06-01 14:11:52 -0400179 return ret ? NULL : (NvBuffer_t *)priv;
180}
181
182int NvImporter::GrallocSetNvBuffer(buffer_handle_t handle, NvBuffer_t *buf) {
183 return gralloc_->perform(gralloc_,
184 GRALLOC_MODULE_PERFORM_SET_IMPORTER_PRIVATE, handle,
Sean Paul419b5e02015-06-10 14:30:47 -0400185 NvGrallocRelease, buf);
Sean Paulda6270d2015-06-01 14:11:52 -0400186}
Sean Paul4c4646e2016-05-10 04:19:24 -0400187
188#ifdef USE_NVIDIA_IMPORTER
189// static
190std::unique_ptr<Planner> Planner::CreateInstance(DrmResources *) {
191 std::unique_ptr<Planner> planner(new Planner);
Adrian Salidoee24aca2017-07-17 17:58:50 -0700192 planner->AddStage<PlanStageNvLimits>();
Sean Paul4c4646e2016-05-10 04:19:24 -0400193 planner->AddStage<PlanStageProtectedRotated>();
194 planner->AddStage<PlanStageProtected>();
Adrian Salido45002322017-04-10 21:44:21 -0700195 planner->AddStage<PlanStagePrecomp>();
Sean Paul4c4646e2016-05-10 04:19:24 -0400196 planner->AddStage<PlanStageGreedy>();
197 return planner;
198}
199#endif
200
201static DrmPlane *GetCrtcPrimaryPlane(DrmCrtc *crtc,
202 std::vector<DrmPlane *> *planes) {
203 for (auto i = planes->begin(); i != planes->end(); ++i) {
204 if ((*i)->GetCrtcSupported(*crtc)) {
205 DrmPlane *plane = *i;
206 planes->erase(i);
207 return plane;
208 }
209 }
210 return NULL;
211}
212
213int PlanStageProtectedRotated::ProvisionPlanes(
214 std::vector<DrmCompositionPlane> *composition,
215 std::map<size_t, DrmHwcLayer *> &layers, DrmCrtc *crtc,
216 std::vector<DrmPlane *> *planes) {
217 int ret;
218 int protected_zorder = -1;
219 for (auto i = layers.begin(); i != layers.end();) {
220 if (!i->second->protected_usage() || !i->second->transform) {
221 ++i;
222 continue;
223 }
224
225 auto primary_iter = planes->begin();
226 for (; primary_iter != planes->end(); ++primary_iter) {
227 if ((*primary_iter)->type() == DRM_PLANE_TYPE_PRIMARY)
228 break;
229 }
230
231 // We cheat a little here. Since there can only be one primary plane per
232 // crtc, we know we'll only hit this case once. So we blindly insert the
233 // protected content at the beginning of the composition, knowing this path
234 // won't be taken a second time during the loop.
235 if (primary_iter != planes->end()) {
236 composition->emplace(composition->begin(),
237 DrmCompositionPlane::Type::kLayer, *primary_iter,
238 crtc, i->first);
239 planes->erase(primary_iter);
240 protected_zorder = i->first;
241 } else {
242 ALOGE("Could not provision primary plane for protected/rotated layer");
243 }
244 i = layers.erase(i);
245 }
246
247 if (protected_zorder == -1)
248 return 0;
249
250 // Add any layers below the protected content to the precomposition since we
251 // need to punch a hole through them.
252 for (auto i = layers.begin(); i != layers.end();) {
253 // Skip layers above the z-order of the protected content
254 if (i->first > static_cast<size_t>(protected_zorder)) {
255 ++i;
256 continue;
257 }
258
259 // If there's no precomp layer already queued, queue one now.
260 DrmCompositionPlane *precomp = GetPrecomp(composition);
261 if (precomp) {
262 precomp->source_layers().emplace_back(i->first);
263 } else {
264 if (planes->size()) {
265 DrmPlane *precomp_plane = planes->back();
266 planes->pop_back();
267 composition->emplace_back(DrmCompositionPlane::Type::kPrecomp,
268 precomp_plane, crtc, i->first);
269 } else {
270 ALOGE("Not enough planes to reserve for precomp fb");
271 }
272 }
273 i = layers.erase(i);
274 }
275 return 0;
276}
Adrian Salidoee24aca2017-07-17 17:58:50 -0700277
278bool PlanStageNvLimits::CheckLayer(DrmHwcLayer *layer) {
279 auto src_w = layer->source_crop.width();
280 auto src_h = layer->source_crop.height();
281 auto dst_w = layer->display_frame.width();
282 auto dst_h = layer->display_frame.height();
283 int h_limit = 4;
284 int v_limit;
285
286 switch (layer->buffer->format) {
287 case DRM_FORMAT_YVU420:
288 case DRM_FORMAT_BGR565:
289 v_limit = 4;
290 break;
291 default:
292 v_limit = 2;
293 break;
294 }
295
296 if (layer->transform &
297 (DrmHwcTransform::kRotate90 | DrmHwcTransform::kRotate270))
298 std::swap(dst_w, dst_h);
299
300 // check for max supported down scaling
301 if (((src_w / dst_w) > h_limit) || ((src_h / dst_h) > v_limit))
302 return false;
303
304 return true;
305}
306
307int PlanStageNvLimits::ProvisionPlanes(
308 std::vector<DrmCompositionPlane> *composition,
309 std::map<size_t, DrmHwcLayer *> &layers, DrmCrtc *crtc,
310 std::vector<DrmPlane *> *planes) {
311 int ret;
312
313 for (auto i = layers.begin(); i != layers.end();) {
314 // Skip layer if supported
315 if (CheckLayer(i->second)) {
316 i++;
317 continue;
318 }
319
320 if (i->second->protected_usage()) {
321 // Drop the layer if unsupported and protected, this will just display
322 // black in the area of this layer but it's better than failing miserably
323 i = layers.erase(i);
324 continue;
325 }
326
327 // If there's no precomp layer already queued, queue one now.
328 DrmCompositionPlane *precomp = GetPrecomp(composition);
329 if (precomp) {
330 precomp->source_layers().emplace_back(i->first);
331 } else if (!planes->empty()) {
332 DrmPlane *precomp_plane = planes->back();
333 planes->pop_back();
334 composition->emplace_back(DrmCompositionPlane::Type::kPrecomp,
335 precomp_plane, crtc, i->first);
336 } else {
337 ALOGE("Not enough planes to reserve for precomp fb");
338 }
339 i = layers.erase(i);
340 }
341
342 return 0;
343}
Sean Paulda6270d2015-06-01 14:11:52 -0400344}