//Screen space rasterization void rst::rasterizer::rasterize_triangle(const Triangle& t, const std::array<Eigen::Vector3f, 3>& view_pos) { // TODO: From your HW3, get the triangle rasterization code. // TODO: Inside your rasterization loop: // * v[i].w() is the vertex view space depth value z. // * Z is interpolated view space depth for the current pixel // * zp is depth between zNear and zFar, used for z-buffer
// TODO: Interpolate the attributes: // auto interpolated_color // auto interpolated_normal // auto interpolated_texcoords // auto interpolated_shadingcoords
// Use: fragment_shader_payload payload( interpolated_color, interpolated_normal.normalized(), interpolated_texcoords, texture ? &*texture : nullptr); // Use: payload.view_pos = interpolated_shadingcoords; // Use: Instead of passing the triangle's color directly to the frame buffer, pass the color to the shaders first to get the final color; // Use: auto pixel_color = fragment_shader(payload);
auto v = t.toVector4();
// Find out the bounding box of current triangle. int min_x = std::floor(std::min({ v[0].x(), v[1].x(), v[2].x() })); int max_x = std::ceil(std::max({ v[0].x(), v[1].x(), v[2].x() })); int min_y = std::floor(std::min({ v[0].y(), v[1].y(), v[2].y() })); int max_y = std::ceil(std::max({ v[0].y(), v[1].y(), v[2].y() }));
for (int x = min_x; x <= max_x; x++) { for (int y = min_y; y <= max_y; y++) { if (insideTriangle(x+0.5f, y+0.5f, t.v)) { auto[alpha, beta, gamma] = computeBarycentric2D(x, y, t.v); float w_reciprocal = 1.0/(alpha / v[0].w() + beta / v[1].w() + gamma / v[2].w()); float z_interpolated = alpha * v[0].z() / v[0].w() + beta * v[1].z() / v[1].w() + gamma * v[2].z() / v[2].w(); z_interpolated *= w_reciprocal;
auto normal_interpolated = alpha*t.normal[0] + beta*t.normal[1] + gamma*t.normal[2]; auto color_interpolated = alpha*t.color[0] + beta*t.color[1] + gamma*t.color[2]; auto textureCoord_interpolated = alpha*t.tex_coords[0] + beta*t.tex_coords[1] + gamma*t.tex_coords[2]; auto shadingCoord_interpolated = alpha*view_pos[0] + beta*view_pos[1] + gamma*view_pos[2];
Eigen::Vector3f v = ( -point).normalized(); // camera at (0, 0, 0) for (auto& light : lights) { // TODO: For each light source in the code, calculate what the *ambient*, *diffuse*, and *specular* // components are. Then, accumulate that result on the *result_color* object. Eigen::Vector3f l = (light.position - point).normalized(); Eigen::Vector3f h = ((l + v) / 2.0f).normalized(); float r2 = (light.position - point).squaredNorm();
auto L_a = ka.cwiseProduct(amb_light_intensity); auto L_d = kd.cwiseProduct(light.intensity / r2 * MAX(0.0f, normal.dot(l))); auto L_s = ks.cwiseProduct(light.intensity / r2 * std::pow(MAX(0.0f, normal.dot(h)), p));
if (payload.texture) { // TODO: Get the texture value at the texture coordinates of the current fragment return_color = payload.texture->getColor(payload.tex_coords.x(), payload.tex_coords.y()); }
// ...
Eigen::Vector3f v = ( -point).normalized(); // camera at (0, 0, 0) for (auto& light : lights) { // TODO: For each light source in the code, calculate what the *ambient*, *diffuse*, and *specular* // components are. Then, accumulate that result on the *result_color* object. Eigen::Vector3f l = (light.position - point).normalized(); Eigen::Vector3f h = ((l + v) / 2.0f).normalized(); float r2 = (light.position - point).squaredNorm();
auto L_a = ka.cwiseProduct(amb_light_intensity); auto L_d = kd.cwiseProduct(light.intensity / r2 * MAX(0.0f, normal.dot(l))); auto L_s = ks.cwiseProduct(light.intensity / r2 * std::pow(MAX(0.0f, normal.dot(h)), p));
// TODO: Implement bump mapping here // Let n = normal = (x, y, z) // Vector t = (x*y/sqrt(x*x+z*z),sqrt(x*x+z*z),z*y/sqrt(x*x+z*z)) // Vector b = n cross product t // Matrix TBN = [t b n] // dU = kh * kn * (h(u+1/w,v)-h(u,v)) // dV = kh * kn * (h(u,v+1/h)-h(u,v)) // Vector ln = (-dU, -dV, 1) // Normal n = normalize(TBN * ln)
float x = normal.x(), y = normal.y(), z = normal.z(); float u = payload.tex_coords.x(), v = payload.tex_coords.y(); int w = payload.texture->width, h = payload.texture->height;
// TODO: Implement displacement mapping here // Let n = normal = (x, y, z) // Vector t = (x*y/sqrt(x*x+z*z),sqrt(x*x+z*z),z*y/sqrt(x*x+z*z)) // Vector b = n cross product t // Matrix TBN = [t b n] // dU = kh * kn * (h(u+1/w,v)-h(u,v)) // dV = kh * kn * (h(u,v+1/h)-h(u,v)) // Vector ln = (-dU, -dV, 1) // Position p = p + kn * n * h(u,v) // Normal n = normalize(TBN * ln)
float x = normal.x(), y = normal.y(), z = normal.z(); float u = payload.tex_coords.x(), v = payload.tex_coords.y(); int w = payload.texture->width, h = payload.texture->height;
point += kn * normal * payload.texture->getColor(u, v).norm(); normal = (TBN * ln).normalized();
Eigen::Vector3f result_color = {0, 0, 0}; Eigen::Vector3f v_ = ( -point).normalized(); // camera at (0, 0, 0) for (auto& light : lights) { // TODO: For each light source in the code, calculate what the *ambient*, *diffuse*, and *specular* // components are. Then, accumulate that result on the *result_color* object.
Eigen::Vector3f l = (light.position - point).normalized(); Eigen::Vector3f h = ((l + v_) / 2.0f).normalized(); float r2 = (light.position - point).squaredNorm();
auto L_a = ka.cwiseProduct(amb_light_intensity); auto L_d = kd.cwiseProduct(light.intensity / r2 * MAX(0.0f, normal.dot(l))); auto L_s = ks.cwiseProduct(light.intensity / r2 * std::pow(MAX(0.0f, normal.dot(h)), p));