Alex Vakulenko | e4eec20 | 2017-01-27 14:41:04 -0800 | [diff] [blame] | 1 | #include "include/private/dvr/composite_hmd.h" |
| 2 | |
| 3 | #include <base/logging.h> |
| 4 | #include <private/dvr/numeric.h> |
| 5 | |
| 6 | namespace android { |
| 7 | namespace dvr { |
| 8 | |
| 9 | CompositeHmd::CompositeHmd(const HeadMountMetrics& head_mount_metrics, |
| 10 | const DisplayMetrics& display_metrics) |
| 11 | : head_mount_metrics_(head_mount_metrics), |
| 12 | display_metrics_(display_metrics) { |
| 13 | MetricsChanged(); |
| 14 | } |
| 15 | |
| 16 | float CompositeHmd::GetTargetFrameDuration() const { |
| 17 | return display_metrics_.GetFrameDurationSeconds(); |
| 18 | } |
| 19 | |
| 20 | vec2 CompositeHmd::ComputeDistortedPoint(EyeType eye, vec2 position, |
| 21 | RgbColorChannel channel) const { |
| 22 | position = TransformPoint(eye_tan_angle_from_norm_screen_matrix_[eye], position); |
| 23 | vec2 distorted = |
| 24 | head_mount_metrics_.GetColorChannelDistortion(channel).Distort(position); |
| 25 | return TransformPoint(eye_norm_texture_from_tan_angle_matrix_[eye], distorted); |
| 26 | } |
| 27 | |
| 28 | vec2 CompositeHmd::ComputeInverseDistortedPoint(EyeType eye, vec2 position, |
| 29 | RgbColorChannel channel) const { |
| 30 | position = TransformPoint(eye_norm_texture_from_tan_angle_inv_matrix_[eye], position); |
| 31 | vec2 distorted = |
| 32 | head_mount_metrics_.GetColorChannelDistortion(channel).DistortInverse( |
| 33 | position); |
| 34 | return TransformPoint(eye_tan_angle_from_norm_screen_inv_matrix_[eye], distorted); |
| 35 | } |
| 36 | |
| 37 | void CompositeHmd::ComputeDistortedVertex(EyeType eye, vec2 uv_in, |
| 38 | vec2* vertex_out, |
| 39 | vec2* uv_out) const { |
| 40 | // The mesh vertices holds the shape of the distortion. |
| 41 | vec2 vertex_position = ComputeInverseDistortedPoint(eye, uv_in, kRed); |
| 42 | *vertex_out = vec2(vertex_position.x() - 0.5f, vertex_position.y() - 0.5f); |
| 43 | |
| 44 | if (uv_out) { |
| 45 | // Compute the texture coordinate for each vertex coordinate. |
| 46 | // Red's is the inverse of the inverse, skip the calculation and use uv_in. |
| 47 | uv_out[kRed] = uv_in; |
| 48 | uv_out[kGreen] = ComputeDistortedPoint(eye, vertex_position, kGreen); |
| 49 | uv_out[kBlue] = ComputeDistortedPoint(eye, vertex_position, kBlue); |
| 50 | } |
| 51 | } |
| 52 | |
| 53 | vec2i CompositeHmd::GetRecommendedRenderTargetSize() const { |
| 54 | return recommended_render_target_size_; |
| 55 | } |
| 56 | |
| 57 | Range2i CompositeHmd::GetDisplayRange() const { return display_range_; } |
| 58 | |
| 59 | mat4 CompositeHmd::GetEyeFromHeadMatrix(EyeType eye) const { |
| 60 | return eye_from_head_matrix_[eye]; |
| 61 | } |
| 62 | |
| 63 | FieldOfView CompositeHmd::GetEyeFov(EyeType eye) const { return eye_fov_[eye]; } |
| 64 | |
| 65 | Range2i CompositeHmd::GetEyeViewportBounds(EyeType eye) const { |
| 66 | return eye_viewport_range_[eye]; |
| 67 | } |
| 68 | |
| 69 | void CompositeHmd::SetHeadMountMetrics( |
| 70 | const HeadMountMetrics& head_mount_metrics) { |
| 71 | // Use the assignement operator to do memberwise copy. |
| 72 | head_mount_metrics_ = head_mount_metrics; |
| 73 | MetricsChanged(); |
| 74 | } |
| 75 | |
| 76 | const HeadMountMetrics& CompositeHmd::GetHeadMountMetrics() const { |
| 77 | return head_mount_metrics_; |
| 78 | } |
| 79 | |
| 80 | void CompositeHmd::SetDisplayMetrics(const DisplayMetrics& display_metrics) { |
| 81 | // Use the assignment operator to do memberwise copy. |
| 82 | display_metrics_ = display_metrics; |
| 83 | MetricsChanged(); |
| 84 | } |
| 85 | |
| 86 | const DisplayMetrics& CompositeHmd::GetDisplayMetrics() const { |
| 87 | return display_metrics_; |
| 88 | } |
| 89 | |
| 90 | void CompositeHmd::MetricsChanged() { |
| 91 | // Abbreviations in variable names: |
| 92 | // "vp": viewport |
| 93 | // "ta": tan-angle |
| 94 | const HeadMountMetrics& mount = head_mount_metrics_; |
| 95 | DisplayMetrics display = display_metrics_; |
| 96 | |
| 97 | if (display.IsPortrait()) { |
| 98 | // If we're in portrait mode, toggle the orientation so that all |
| 99 | // calculations are done in landscape mode. |
| 100 | display.ToggleOrientation(); |
| 101 | } |
| 102 | |
| 103 | float display_width_meters = display.GetSizeMeters()[0]; |
| 104 | float display_height_meters = display.GetSizeMeters()[1]; |
| 105 | |
| 106 | vec2 pixels_per_meter = vec2(1.0f / display.GetMetersPerPixel()[0], |
| 107 | 1.0f / display.GetMetersPerPixel()[1]); |
| 108 | |
| 109 | // virtual_eye_to_screen_dist is the distance from the screen to the eye |
| 110 | // after it has been projected through the lens. This would normally be |
| 111 | // slightly different from the distance to the actual eye. |
| 112 | float virtual_eye_to_screen_dist = mount.GetVirtualEyeToScreenDistance(); |
| 113 | float meters_per_tan_angle = virtual_eye_to_screen_dist; |
| 114 | vec2 pixels_per_tan_angle = pixels_per_meter * meters_per_tan_angle; |
| 115 | |
| 116 | CHECK_NE(0.0f, display_width_meters); |
| 117 | CHECK_NE(0.0f, display_height_meters); |
| 118 | CHECK_NE(0.0f, virtual_eye_to_screen_dist); |
| 119 | |
| 120 | // Height of lenses from the bottom of the screen. |
| 121 | float lens_y_center = 0; |
| 122 | float bottom_dist = 0; |
| 123 | float top_dist = 0; |
| 124 | |
| 125 | // bottom_display_dist and top_display_dist represent the distance from the |
| 126 | // lens center to the edge of the display. |
| 127 | float bottom_display_dist = 0; |
| 128 | float top_display_dist = 0; |
| 129 | switch (mount.GetVerticalAlignment()) { |
| 130 | case HeadMountMetrics::kBottom: |
| 131 | lens_y_center = |
| 132 | mount.GetTrayToLensDistance() - display.GetBorderSizeMeters(); |
| 133 | bottom_dist = lens_y_center; |
| 134 | top_dist = lens_y_center; |
| 135 | bottom_display_dist = lens_y_center; |
| 136 | top_display_dist = display_height_meters - lens_y_center; |
| 137 | break; |
| 138 | case HeadMountMetrics::kCenter: |
| 139 | // TODO(hendrikw): This should respect the border size, but since we |
| 140 | // currently hard code the border size, it would break |
| 141 | // the distortion on some devices. Revisit when border |
| 142 | // size is fixed. |
| 143 | lens_y_center = display_height_meters * 0.5f; |
| 144 | bottom_dist = lens_y_center; |
| 145 | top_dist = lens_y_center; |
| 146 | bottom_display_dist = lens_y_center; |
| 147 | top_display_dist = lens_y_center; |
| 148 | break; |
| 149 | case HeadMountMetrics::kTop: |
| 150 | lens_y_center = display_height_meters - (mount.GetTrayToLensDistance() - |
| 151 | display.GetBorderSizeMeters()); |
| 152 | bottom_dist = |
| 153 | mount.GetTrayToLensDistance() - display.GetBorderSizeMeters(); |
| 154 | top_dist = bottom_dist; |
| 155 | bottom_display_dist = lens_y_center; |
| 156 | top_display_dist = display_height_meters - lens_y_center; |
| 157 | break; |
| 158 | } |
| 159 | |
| 160 | float inner_dist = mount.GetScreenCenterToLensDistance(); |
| 161 | float outer_dist = display_width_meters * 0.5f - inner_dist; |
| 162 | |
| 163 | // We don't take chromatic aberration into account yet for computing FOV, |
| 164 | // viewport, etc, so we only use the green channel for now. Note the actual |
| 165 | // Distort function *does* implement chromatic aberration. |
| 166 | const ColorChannelDistortion& distortion = |
| 167 | mount.GetColorChannelDistortion(kGreen); |
| 168 | |
| 169 | vec2 outer_point(outer_dist / virtual_eye_to_screen_dist, 0.0f); |
| 170 | vec2 inner_point(inner_dist / virtual_eye_to_screen_dist, 0.0f); |
| 171 | vec2 bottom_point(0.0f, bottom_dist / virtual_eye_to_screen_dist); |
| 172 | vec2 top_point(0.0f, top_dist / virtual_eye_to_screen_dist); |
| 173 | |
| 174 | float outer_angle = atanf(distortion.Distort(outer_point)[0]); |
| 175 | float inner_angle = atanf(distortion.Distort(inner_point)[0]); |
| 176 | float bottom_angle = atanf(distortion.Distort(bottom_point)[1]); |
| 177 | float top_angle = atanf(distortion.Distort(top_point)[1]); |
| 178 | |
| 179 | for (EyeType eye : {kLeftEye, kRightEye}) { |
| 180 | const FieldOfView max_fov = mount.GetEyeMaxFov(eye); |
| 181 | float left_angle = (eye == kLeftEye) ? outer_angle : inner_angle; |
| 182 | float right_angle = (eye == kLeftEye) ? inner_angle : outer_angle; |
| 183 | |
| 184 | eye_fov_[eye] = FieldOfView(std::min(left_angle, max_fov.GetLeft()), |
| 185 | std::min(right_angle, max_fov.GetRight()), |
| 186 | std::min(bottom_angle, max_fov.GetBottom()), |
| 187 | std::min(top_angle, max_fov.GetTop())); |
| 188 | |
| 189 | vec2 texture_vp_ta_p1 = |
| 190 | vec2(-tanf(eye_fov_[eye].GetLeft()), -tanf(eye_fov_[eye].GetBottom())); |
| 191 | vec2 texture_vp_ta_p2 = |
| 192 | vec2(tanf(eye_fov_[eye].GetRight()), tanf(eye_fov_[eye].GetTop())); |
| 193 | vec2 texture_vp_size_ta = texture_vp_ta_p2 - texture_vp_ta_p1; |
| 194 | |
| 195 | vec2 texture_vp_sizef_pixels = |
| 196 | texture_vp_size_ta.array() * pixels_per_tan_angle.array(); |
| 197 | |
| 198 | vec2i texture_vp_size_pixels = |
| 199 | vec2i(static_cast<int32_t>(roundf(texture_vp_sizef_pixels[0])), |
| 200 | static_cast<int32_t>(roundf(texture_vp_sizef_pixels[1]))); |
| 201 | int vp_start_x = |
| 202 | (eye == kLeftEye) ? 0 : eye_viewport_range_[kLeftEye].p2[0]; |
| 203 | |
| 204 | eye_viewport_range_[eye] = |
| 205 | Range2i::FromSize(vec2i(vp_start_x, 0), texture_vp_size_pixels); |
| 206 | float left_dist = (eye == kLeftEye) ? outer_dist : inner_dist; |
| 207 | float right_dist = (eye == kLeftEye) ? inner_dist : outer_dist; |
| 208 | vec2 screen_ta_p1(-left_dist / virtual_eye_to_screen_dist, |
| 209 | -bottom_display_dist / virtual_eye_to_screen_dist); |
| 210 | vec2 screen_ta_p2(right_dist / virtual_eye_to_screen_dist, |
| 211 | top_display_dist / virtual_eye_to_screen_dist); |
| 212 | vec2 screen_ta_size = screen_ta_p2 - screen_ta_p1; |
| 213 | |
| 214 | // Align the tan angle coordinates to the nearest pixel. This will ensure |
| 215 | // that the optical center doesn't straddle multiple pixels. |
| 216 | // TODO(hendrikw): verify that this works correctly for Daydream View. |
| 217 | vec2 tan_angle_per_pixel(screen_ta_size.array() / |
| 218 | texture_vp_size_pixels.cast<float>().array()); |
| 219 | vec2 pixel_p1(screen_ta_p1.array() / tan_angle_per_pixel.array()); |
| 220 | vec2 pixel_shift(roundf(pixel_p1.x()) - pixel_p1.x(), |
| 221 | roundf(pixel_p1.y()) - pixel_p1.y()); |
| 222 | screen_ta_p1 += |
| 223 | (tan_angle_per_pixel.array() * pixel_shift.array()).matrix(); |
| 224 | screen_ta_p2 += |
| 225 | (tan_angle_per_pixel.array() * pixel_shift.array()).matrix(); |
| 226 | |
| 227 | // Calculate the transformations needed for the distortions. |
| 228 | eye_tan_angle_from_norm_screen_matrix_[eye] = |
| 229 | TranslationMatrix(vec2(screen_ta_p1)) * |
| 230 | ScaleMatrix(screen_ta_size); |
| 231 | eye_tan_angle_from_norm_screen_inv_matrix_[eye] = |
| 232 | eye_tan_angle_from_norm_screen_matrix_[eye].inverse(); |
| 233 | |
| 234 | eye_norm_texture_from_tan_angle_inv_matrix_[eye] = |
| 235 | TranslationMatrix(texture_vp_ta_p1) * |
| 236 | ScaleMatrix(texture_vp_size_ta); |
| 237 | eye_norm_texture_from_tan_angle_matrix_[eye] = |
| 238 | eye_norm_texture_from_tan_angle_inv_matrix_[eye].inverse(); |
| 239 | } |
| 240 | vec2i left_vp_size = eye_viewport_range_[kLeftEye].GetSize(); |
| 241 | vec2i right_vp_size = eye_viewport_range_[kRightEye].GetSize(); |
| 242 | |
| 243 | recommended_render_target_size_ = |
| 244 | vec2i(left_vp_size[0] + right_vp_size[0], |
| 245 | std::max(left_vp_size[1], right_vp_size[1])); |
| 246 | |
| 247 | display_range_ = Range2i::FromSize(vec2i(0, 0), display.GetSizePixels()); |
| 248 | |
| 249 | eye_from_head_matrix_[kLeftEye] = Eigen::Translation3f( |
| 250 | vec3(mount.GetScreenCenterToLensDistance(), 0.0f, 0.0f)); |
| 251 | eye_from_head_matrix_[kRightEye] = Eigen::Translation3f( |
| 252 | vec3(-mount.GetScreenCenterToLensDistance(), 0.0f, 0.0f)); |
| 253 | } |
| 254 | |
| 255 | } // namespace dvr |
| 256 | } // namespace android |