blob: 084d4f031a1c5ed9d6a7c4a28853e03fd9ea9fa5 [file] [log] [blame]
Sean Paulda6270d2015-06-01 14:11:52 -04001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Sean Paul0aee6b22016-05-10 04:08:10 -040017#define LOG_TAG "hwc-platform-nv"
Sean Paulda6270d2015-06-01 14:11:52 -040018
19#include "drmresources.h"
Sean Paul63769962016-04-21 16:25:06 -040020#include "platform.h"
Sean Paulea045b72016-04-21 16:39:02 -040021#include "platformnv.h"
Sean Paulda6270d2015-06-01 14:11:52 -040022
Sean Paul5325e102016-03-29 13:55:35 -040023#include <cinttypes>
Sean Paul419b5e02015-06-10 14:30:47 -040024#include <stdatomic.h>
Sean Paulda6270d2015-06-01 14:11:52 -040025#include <xf86drm.h>
26#include <xf86drmMode.h>
27
28#include <cutils/log.h>
29#include <hardware/gralloc.h>
30
31namespace android {
32
33#ifdef USE_NVIDIA_IMPORTER
34// static
35Importer *Importer::CreateInstance(DrmResources *drm) {
36 NvImporter *importer = new NvImporter(drm);
37 if (!importer)
38 return NULL;
39
40 int ret = importer->Init();
41 if (ret) {
42 ALOGE("Failed to initialize the nv importer %d", ret);
43 delete importer;
44 return NULL;
45 }
46 return importer;
47}
48#endif
49
50NvImporter::NvImporter(DrmResources *drm) : drm_(drm) {
51}
52
53NvImporter::~NvImporter() {
54}
55
56int NvImporter::Init() {
57 int ret = hw_get_module(GRALLOC_HARDWARE_MODULE_ID,
58 (const hw_module_t **)&gralloc_);
59 if (ret) {
60 ALOGE("Failed to open gralloc module %d", ret);
61 return ret;
62 }
63
64 if (strcasecmp(gralloc_->common.author, "NVIDIA"))
65 ALOGW("Using non-NVIDIA gralloc module: %s/%s\n", gralloc_->common.name,
66 gralloc_->common.author);
67
68 return 0;
69}
70
71int NvImporter::ImportBuffer(buffer_handle_t handle, hwc_drm_bo_t *bo) {
72 memset(bo, 0, sizeof(hwc_drm_bo_t));
73 NvBuffer_t *buf = GrallocGetNvBuffer(handle);
74 if (buf) {
Sean Paul419b5e02015-06-10 14:30:47 -040075 atomic_fetch_add(&buf->ref, 1);
Sean Paulda6270d2015-06-01 14:11:52 -040076 *bo = buf->bo;
77 return 0;
78 }
79
80 buf = new NvBuffer_t();
81 if (!buf) {
82 ALOGE("Failed to allocate new NvBuffer_t");
83 return -ENOMEM;
84 }
Sean Paul419b5e02015-06-10 14:30:47 -040085 buf->bo.priv = buf;
Sean Paulda6270d2015-06-01 14:11:52 -040086 buf->importer = this;
87
Sean Paul419b5e02015-06-10 14:30:47 -040088 // We initialize the reference count to 2 since NvGralloc is still using this
89 // buffer (will be cleared in the NvGrallocRelease), and the other
90 // reference is for HWC (this ImportBuffer call).
91 atomic_init(&buf->ref, 2);
92
Sean Paulda6270d2015-06-01 14:11:52 -040093 int ret = gralloc_->perform(gralloc_, GRALLOC_MODULE_PERFORM_DRM_IMPORT,
94 drm_->fd(), handle, &buf->bo);
95 if (ret) {
96 ALOGE("GRALLOC_MODULE_PERFORM_DRM_IMPORT failed %d", ret);
97 delete buf;
98 return ret;
99 }
100
101 ret = drmModeAddFB2(drm_->fd(), buf->bo.width, buf->bo.height, buf->bo.format,
102 buf->bo.gem_handles, buf->bo.pitches, buf->bo.offsets,
103 &buf->bo.fb_id, 0);
104 if (ret) {
105 ALOGE("Failed to add fb %d", ret);
106 ReleaseBufferImpl(&buf->bo);
107 delete buf;
108 return ret;
109 }
110
111 ret = GrallocSetNvBuffer(handle, buf);
112 if (ret) {
113 /* This will happen is persist.tegra.gpu_mapping_cache is 0/off,
114 * or if NV gralloc runs out of "priv slots" (currently 3 per buffer,
115 * only one of which should be used by drm_hwcomposer). */
116 ALOGE("Failed to register free callback for imported buffer %d", ret);
117 ReleaseBufferImpl(&buf->bo);
118 delete buf;
119 return ret;
120 }
121 *bo = buf->bo;
122 return 0;
123}
124
Zach Reiznerc6520e42015-08-13 14:32:09 -0700125int NvImporter::ReleaseBuffer(hwc_drm_bo_t *bo) {
Sean Paul419b5e02015-06-10 14:30:47 -0400126 NvBuffer_t *buf = (NvBuffer_t *)bo->priv;
127 if (!buf) {
Sean Paul5325e102016-03-29 13:55:35 -0400128 ALOGE("Freeing bo %" PRIu32 ", buf is NULL!", bo->fb_id);
Sean Paul419b5e02015-06-10 14:30:47 -0400129 return 0;
130 }
131 if (atomic_fetch_sub(&buf->ref, 1) > 1)
132 return 0;
133
134 ReleaseBufferImpl(bo);
135 delete buf;
Sean Paulda6270d2015-06-01 14:11:52 -0400136 return 0;
137}
138
139// static
Sean Paul419b5e02015-06-10 14:30:47 -0400140void NvImporter::NvGrallocRelease(void *nv_buffer) {
141 NvBuffer_t *buf = (NvBuffer *)nv_buffer;
142 buf->importer->ReleaseBuffer(&buf->bo);
Sean Paulda6270d2015-06-01 14:11:52 -0400143}
144
145void NvImporter::ReleaseBufferImpl(hwc_drm_bo_t *bo) {
146 if (bo->fb_id) {
147 int ret = drmModeRmFB(drm_->fd(), bo->fb_id);
148 if (ret)
149 ALOGE("Failed to rm fb %d", ret);
150 }
151
152 struct drm_gem_close gem_close;
153 memset(&gem_close, 0, sizeof(gem_close));
154 int num_gem_handles = sizeof(bo->gem_handles) / sizeof(bo->gem_handles[0]);
155 for (int i = 0; i < num_gem_handles; i++) {
156 if (!bo->gem_handles[i])
157 continue;
158
159 gem_close.handle = bo->gem_handles[i];
160 int ret = drmIoctl(drm_->fd(), DRM_IOCTL_GEM_CLOSE, &gem_close);
Isaac Simha6e02c9d2015-10-14 11:40:29 -0700161 if (ret) {
Sean Paulda6270d2015-06-01 14:11:52 -0400162 ALOGE("Failed to close gem handle %d %d", i, ret);
Isaac Simha6e02c9d2015-10-14 11:40:29 -0700163 } else {
164 /* Clear any duplicate gem handle as well but don't close again */
Haixia Shi479412c2015-10-27 10:40:48 -0700165 for (int j = i + 1; j < num_gem_handles; j++)
166 if (bo->gem_handles[j] == bo->gem_handles[i])
Isaac Simha6e02c9d2015-10-14 11:40:29 -0700167 bo->gem_handles[j] = 0;
Sean Paulda6270d2015-06-01 14:11:52 -0400168 bo->gem_handles[i] = 0;
Isaac Simha6e02c9d2015-10-14 11:40:29 -0700169 }
Sean Paulda6270d2015-06-01 14:11:52 -0400170 }
171}
172
173NvImporter::NvBuffer_t *NvImporter::GrallocGetNvBuffer(buffer_handle_t handle) {
174 void *priv = NULL;
175 int ret =
176 gralloc_->perform(gralloc_, GRALLOC_MODULE_PERFORM_GET_IMPORTER_PRIVATE,
Sean Paul419b5e02015-06-10 14:30:47 -0400177 handle, NvGrallocRelease, &priv);
Sean Paulda6270d2015-06-01 14:11:52 -0400178 return ret ? NULL : (NvBuffer_t *)priv;
179}
180
181int NvImporter::GrallocSetNvBuffer(buffer_handle_t handle, NvBuffer_t *buf) {
182 return gralloc_->perform(gralloc_,
183 GRALLOC_MODULE_PERFORM_SET_IMPORTER_PRIVATE, handle,
Sean Paul419b5e02015-06-10 14:30:47 -0400184 NvGrallocRelease, buf);
Sean Paulda6270d2015-06-01 14:11:52 -0400185}
Sean Paul4c4646e2016-05-10 04:19:24 -0400186
187#ifdef USE_NVIDIA_IMPORTER
188// static
189std::unique_ptr<Planner> Planner::CreateInstance(DrmResources *) {
190 std::unique_ptr<Planner> planner(new Planner);
191 planner->AddStage<PlanStageProtectedRotated>();
192 planner->AddStage<PlanStageProtected>();
Adrian Salido45002322017-04-10 21:44:21 -0700193 planner->AddStage<PlanStagePrecomp>();
Sean Paul4c4646e2016-05-10 04:19:24 -0400194 planner->AddStage<PlanStageGreedy>();
195 return planner;
196}
197#endif
198
199static DrmPlane *GetCrtcPrimaryPlane(DrmCrtc *crtc,
200 std::vector<DrmPlane *> *planes) {
201 for (auto i = planes->begin(); i != planes->end(); ++i) {
202 if ((*i)->GetCrtcSupported(*crtc)) {
203 DrmPlane *plane = *i;
204 planes->erase(i);
205 return plane;
206 }
207 }
208 return NULL;
209}
210
211int PlanStageProtectedRotated::ProvisionPlanes(
212 std::vector<DrmCompositionPlane> *composition,
213 std::map<size_t, DrmHwcLayer *> &layers, DrmCrtc *crtc,
214 std::vector<DrmPlane *> *planes) {
215 int ret;
216 int protected_zorder = -1;
217 for (auto i = layers.begin(); i != layers.end();) {
218 if (!i->second->protected_usage() || !i->second->transform) {
219 ++i;
220 continue;
221 }
222
223 auto primary_iter = planes->begin();
224 for (; primary_iter != planes->end(); ++primary_iter) {
225 if ((*primary_iter)->type() == DRM_PLANE_TYPE_PRIMARY)
226 break;
227 }
228
229 // We cheat a little here. Since there can only be one primary plane per
230 // crtc, we know we'll only hit this case once. So we blindly insert the
231 // protected content at the beginning of the composition, knowing this path
232 // won't be taken a second time during the loop.
233 if (primary_iter != planes->end()) {
234 composition->emplace(composition->begin(),
235 DrmCompositionPlane::Type::kLayer, *primary_iter,
236 crtc, i->first);
237 planes->erase(primary_iter);
238 protected_zorder = i->first;
239 } else {
240 ALOGE("Could not provision primary plane for protected/rotated layer");
241 }
242 i = layers.erase(i);
243 }
244
245 if (protected_zorder == -1)
246 return 0;
247
248 // Add any layers below the protected content to the precomposition since we
249 // need to punch a hole through them.
250 for (auto i = layers.begin(); i != layers.end();) {
251 // Skip layers above the z-order of the protected content
252 if (i->first > static_cast<size_t>(protected_zorder)) {
253 ++i;
254 continue;
255 }
256
257 // If there's no precomp layer already queued, queue one now.
258 DrmCompositionPlane *precomp = GetPrecomp(composition);
259 if (precomp) {
260 precomp->source_layers().emplace_back(i->first);
261 } else {
262 if (planes->size()) {
263 DrmPlane *precomp_plane = planes->back();
264 planes->pop_back();
265 composition->emplace_back(DrmCompositionPlane::Type::kPrecomp,
266 precomp_plane, crtc, i->first);
267 } else {
268 ALOGE("Not enough planes to reserve for precomp fb");
269 }
270 }
271 i = layers.erase(i);
272 }
273 return 0;
274}
Sean Paulda6270d2015-06-01 14:11:52 -0400275}