Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions src/App.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import RootLayout from "./layout/Root";
import ManageUserTypes, { loader as loadUsers } from "./pages/Administrator/ManageUserTypes";
import Assignment from "./pages/Assignments/Assignment";
import AssignmentEditor from "./pages/Assignments/AssignmentEditor";
import CalibrationReview from "./pages/Assignments/CalibrationReview";
import { loadAssignment } from "./pages/Assignments/AssignmentUtil";
import ResponseMappings from "./pages/ResponseMappings/ResponseMappings";
import CreateTeams from "./pages/Assignments/CreateTeams";
Expand Down Expand Up @@ -124,6 +125,10 @@ function App() {
element: <ViewDelayedJobs />,
loader: loadAssignment,
},
{
path: "assignments/edit/:assignmentId/calibration/:mapId",
element: <CalibrationReview />,
},

{
path: "assignments/new",
Expand Down
311 changes: 251 additions & 60 deletions src/pages/Assignments/AssignmentEditor.tsx

Large diffs are not rendered by default.

159 changes: 159 additions & 0 deletions src/pages/Assignments/CalibrationReview.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
import { useEffect, useState } from "react";
import { Alert, Card, Container, Spinner, Tab, Tabs } from "react-bootstrap";
import { Link, useParams } from "react-router-dom";
import axiosClient from "../../utils/axios_client";
import {
normalizeCalibrationReport,
type CalibrationReportResponse,
} from "./calibrationReportNormalize";
import CalibrationStackedChart from "./components/CalibrationStackedChart";
import CalibrationRubricDetailPanel from "./components/CalibrationRubricDetailPanel";

const CalibrationReview = () => {
const { assignmentId, mapId } = useParams();
const [report, setReport] = useState<CalibrationReportResponse | null>(null);
const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
const [selectedReviewerId, setSelectedReviewerId] = useState<number | null>(null);
const [activeTab, setActiveTab] = useState("comparison");

useEffect(() => {
const loadReport = async () => {
if (!assignmentId || !mapId) {
setError("Missing assignment or calibration map id.");
setLoading(false);
return;
}

try {
setLoading(true);
setError(null);
const response = await axiosClient.get<CalibrationReportResponse>(
`/assignments/${assignmentId}/reports/calibration/${mapId}`
);
setReport(response.data);
} catch (err: any) {
setError(err?.response?.data?.error || "Unable to load calibration report");
} finally {
setLoading(false);
}
};

loadReport();
}, [assignmentId, mapId]);
Comment on lines +20 to +43
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Prevent stale state from out-of-order fetch completions.

If route params change quickly, an older request can resolve last and overwrite the newer report/error state.

🔧 Suggested fix
 useEffect(() => {
+  let isCurrent = true;
   const loadReport = async () => {
     if (!assignmentId || !mapId) {
-      setError("Missing assignment or calibration map id.");
-      setLoading(false);
+      if (isCurrent) {
+        setError("Missing assignment or calibration map id.");
+        setLoading(false);
+      }
       return;
     }

     try {
-      setLoading(true);
-      setError(null);
+      if (isCurrent) {
+        setLoading(true);
+        setError(null);
+      }
       const response = await axiosClient.get<CalibrationReportResponse>(
         `/assignments/${assignmentId}/reports/calibration/${mapId}`
       );
-      setReport(response.data);
+      if (isCurrent) {
+        setReport(response.data);
+      }
     } catch (err: any) {
-      setError(err?.response?.data?.error || "Unable to load calibration report");
+      if (isCurrent) {
+        setError(err?.response?.data?.error || "Unable to load calibration report");
+      }
     } finally {
-      setLoading(false);
+      if (isCurrent) {
+        setLoading(false);
+      }
     }
   };

   loadReport();
+  return () => {
+    isCurrent = false;
+  };
 }, [assignmentId, mapId]);
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/pages/Assignments/CalibrationReview.tsx` around lines 20 - 43, The
useEffect's loadReport can have out-of-order responses that overwrite state;
prevent this by tracking and validating the active request before calling
setReport/setError/setLoading: create a per-effect signal (either an
AbortController passed into axiosClient.get or an incrementing requestId stored
in a ref) inside the useEffect, attach it to the axios request, and in the
response and catch handlers verify the signal matches (or that !signal.aborted)
before calling setReport, setError or setLoading; ensure you abort/mark the
signal stale in the cleanup return of the useEffect so previous requests cannot
update state (update code in the useEffect surrounding loadReport, and
references to loadReport, axiosClient.get, setReport, setError, setLoading).


const normalizedReport = report ? normalizeCalibrationReport(report) : null;
useEffect(() => {
setSelectedReviewerId(normalizedReport?.defaultReviewerId ?? null);
}, [normalizedReport?.defaultReviewerId]);

const studentResponseCount = normalizedReport?.latestStudentResponses.length ?? 0;
const hyperlinks = report?.submitted_content?.hyperlinks ?? [];
const files = report?.submitted_content?.files ?? [];
const hasSubmittedContent = hyperlinks.length > 0 || files.length > 0;
const backHref = assignmentId ? `/assignments/edit/${assignmentId}` : "/assignments";
const rubricDetailRows =
normalizedReport && selectedReviewerId
? normalizedReport.rubricDetailRowsByReviewer[selectedReviewerId] ?? normalizedReport.rubricDetailRows
: normalizedReport?.rubricDetailRows ?? [];
Comment on lines +56 to +58
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Use an explicit null check for selectedReviewerId.

The current truthy check skips reviewer-specific rows when id is 0. Use selectedReviewerId !== null for correctness.

🔧 Suggested fix
 const rubricDetailRows =
-  normalizedReport && selectedReviewerId
+  normalizedReport && selectedReviewerId !== null
     ? normalizedReport.rubricDetailRowsByReviewer[selectedReviewerId] ?? normalizedReport.rubricDetailRows
     : normalizedReport?.rubricDetailRows ?? [];
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
normalizedReport && selectedReviewerId
? normalizedReport.rubricDetailRowsByReviewer[selectedReviewerId] ?? normalizedReport.rubricDetailRows
: normalizedReport?.rubricDetailRows ?? [];
const rubricDetailRows =
normalizedReport && selectedReviewerId !== null
? normalizedReport.rubricDetailRowsByReviewer[selectedReviewerId] ?? normalizedReport.rubricDetailRows
: normalizedReport?.rubricDetailRows ?? [];
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/pages/Assignments/CalibrationReview.tsx` around lines 56 - 58, The
conditional that chooses reviewer-specific rows incorrectly treats a reviewer id
of 0 as falsy; update the check in the expression that uses normalizedReport and
selectedReviewerId so it uses an explicit null check (selectedReviewerId !==
null) instead of a truthy check, keeping the same fallback to
normalizedReport.rubricDetailRows when the reviewer-specific map
(normalizedReport.rubricDetailRowsByReviewer[selectedReviewerId]) is missing;
modify the expression referencing normalizedReport, selectedReviewerId,
rubricDetailRowsByReviewer, and rubricDetailRows accordingly.


return (
<Container fluid className="py-4 px-4">
<div className="d-flex justify-content-between align-items-start flex-wrap gap-3 mb-4">
<div>
<h1 className="mb-1">Calibration Report</h1>
<p className="text-muted mb-0">
Assignment {assignmentId} · Calibration map {mapId}
{report && (
<>
{" "}
· {studentResponseCount} student response{studentResponseCount === 1 ? "" : "s"}
</>
)}
</p>
</div>
<Link to={backHref} className="btn btn-outline-secondary btn-sm">
← Back to assignment
</Link>
</div>

{loading && (
<div className="d-flex align-items-center gap-2 mb-4">
<Spinner animation="border" size="sm" />
<span>Loading calibration report...</span>
</div>
)}

{!loading && error && (
<Alert variant="danger">
<Alert.Heading>Unable to load calibration report</Alert.Heading>
<div>{error}</div>
</Alert>
)}

{!loading && !error && report && normalizedReport && (
<>
<Tabs
activeKey={activeTab}
className="mb-4"
id="calibration-report-tabs"
onSelect={(key) => setActiveTab(key ?? "comparison")}
>
<Tab eventKey="comparison" title="Class comparison (stacked)">
<div className="pt-3">
<CalibrationStackedChart
bucketKeys={normalizedReport.bucketKeys}
chartData={normalizedReport.stackedChartData}
/>
</div>
</Tab>
<Tab eventKey="detail" title="Rubric detail">
<div className="pt-3">
<CalibrationRubricDetailPanel
reviewerOptions={normalizedReport.reviewerOptions}
selectedReviewerId={selectedReviewerId}
rows={rubricDetailRows}
onReviewerChange={setSelectedReviewerId}
/>

{hasSubmittedContent && (
<Card className="mb-4">
<Card.Body>
<Card.Title as="h5">Submitted content</Card.Title>
{hyperlinks.length > 0 && (
<>
<h6 className="mt-3 mb-2 text-muted">Hyperlinks</h6>
<ul className="mb-3">
{hyperlinks.map((link, idx) => (
<li key={`${link}-${idx}`}>
<a href={link} target="_blank" rel="noopener noreferrer">
{link}
</a>
Comment on lines +127 to +131
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Validate submitted hyperlink protocols before rendering anchor tags.

Rendering backend-provided links directly allows unsafe schemes (e.g., javascript:), which is a security risk on click.

🔒 Suggested fix
 {hyperlinks.map((link, idx) => (
-  <li key={`${link}-${idx}`}>
-    <a href={link} target="_blank" rel="noopener noreferrer">
-      {link}
-    </a>
-  </li>
+  <li key={`${link}-${idx}`}>
+    {(() => {
+      try {
+        const parsed = new URL(link);
+        const isSafe = parsed.protocol === "http:" || parsed.protocol === "https:";
+        return isSafe ? (
+          <a href={parsed.toString()} target="_blank" rel="noopener noreferrer">
+            {link}
+          </a>
+        ) : (
+          <span>{link}</span>
+        );
+      } catch {
+        return <span>{link}</span>;
+      }
+    })()}
+  </li>
 ))}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
{hyperlinks.map((link, idx) => (
<li key={`${link}-${idx}`}>
<a href={link} target="_blank" rel="noopener noreferrer">
{link}
</a>
{hyperlinks.map((link, idx) => (
<li key={`${link}-${idx}`}>
{(() => {
try {
const parsed = new URL(link);
const isSafe = parsed.protocol === "http:" || parsed.protocol === "https:";
return isSafe ? (
<a href={parsed.toString()} target="_blank" rel="noopener noreferrer">
{link}
</a>
) : (
<span>{link}</span>
);
} catch {
return <span>{link}</span>;
}
})()}
</li>
))}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/pages/Assignments/CalibrationReview.tsx` around lines 127 - 131, In
CalibrationReview.tsx, the hyperlinks.map render is using backend-provided links
directly; add a validation step (e.g., create a helper validateUrl or
isSafeProtocol) that uses the URL constructor or regex to allow only safe
protocols (http:, https:, mailto:) and rejects others; update the mapping in the
CalibrationReview component to either filter out unsafe links or render them
with a safe fallback (e.g., plain text or href="#" and no target) when
validateUrl(link) returns false, ensuring the anchor tag's href is never set to
an unsafe scheme.

</li>
))}
</ul>
</>
)}
{files.length > 0 && (
<>
<h6 className="mt-3 mb-2 text-muted">Files</h6>
<ul className="mb-0">
{files.map((file, idx) => (
<li key={`${file}-${idx}`}>{file}</li>
))}
</ul>
</>
)}
</Card.Body>
</Card>
)}
</div>
</Tab>
</Tabs>
</>
)}
</Container>
);
};

export default CalibrationReview;
92 changes: 92 additions & 0 deletions src/pages/Assignments/__tests__/CalibrationReview.test.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import { render, screen, waitFor } from "@testing-library/react";
import { MemoryRouter, Route, Routes } from "react-router-dom";
import { vi } from "vitest";
import CalibrationReview from "../CalibrationReview";

const { mockGet } = vi.hoisted(() => ({ mockGet: vi.fn() }));
vi.mock("../../../utils/axios_client", () => ({
default: { get: mockGet },
}));

const mockReport = {
map_id: 8,
assignment_id: 1,
reviewee_id: 5,
rubric_items: [{ id: 11, txt: "Code quality", seq: 1, min_score: 0, max_score: 5 }],
instructor_response: {
id: 21,
map_id: 8,
reviewer_id: 15,
reviewer_name: "instructor",
is_submitted: true,
updated_at: "2026-04-23T16:00:00Z",
answers: [{ item_id: 11, score: 4, comments: "Good code" }],
},
student_responses: [
{
id: 31,
map_id: 9,
reviewer_id: 22,
reviewer_name: "Student A",
is_submitted: true,
updated_at: "2026-04-23T16:00:00Z",
answers: [{ item_id: 11, score: 3, comments: "Mostly good" }],
},
],
per_item_summary: [
{
item_id: 11,
item_label: "Code quality",
item_seq: 1,
instructor_score: 4,
instructor_comment: "Good code",
bucket_counts: { "0": 0, "1": 0, "2": 0, "3": 1, "4": 0, "5": 0 },
student_response_count: 1,
},
],
submitted_content: { hyperlinks: [], files: [] },
};

const renderPage = () =>
render(
<MemoryRouter initialEntries={["/assignments/1/calibration/8"]}>
<Routes>
<Route
path="/assignments/:assignmentId/calibration/:mapId"
element={<CalibrationReview />}
/>
</Routes>
</MemoryRouter>
);

describe("CalibrationReview page", () => {
afterEach(() => vi.clearAllMocks());

test("shows loading spinner then renders report heading and student count on success", async () => {
mockGet.mockResolvedValueOnce({ data: mockReport } as any);

renderPage();

expect(screen.getByText(/loading calibration report/i)).toBeInTheDocument();

await waitFor(() =>
expect(screen.getByText(/calibration report/i)).toBeInTheDocument()
);
Comment on lines +72 to +74
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Verify overlapping text that makes the current wait condition ambiguous.
rg -n 'Loading calibration report|getByText\(/calibration report|Calibration Report' \
  src/pages/Assignments/CalibrationReview.tsx \
  src/pages/Assignments/__tests__/CalibrationReview.test.tsx

Repository: expertiza/reimplementation-front-end

Length of output: 418


Wait for the heading element to avoid matching the loading text.

/calibration report/i matches both "Calibration Report" (the actual heading) and "Loading calibration report..." (the loading state), causing the wait to complete too early. Use findByRole("heading") instead, which waits specifically for the heading to appear.

Proposed fix
    await waitFor(() =>
-      expect(screen.getByText(/calibration report/i)).toBeInTheDocument()
+    await screen.findByRole("heading", { name: /calibration report/i });
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
await waitFor(() =>
expect(screen.getByText(/calibration report/i)).toBeInTheDocument()
);
await screen.findByRole("heading", { name: /calibration report/i });
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/pages/Assignments/__tests__/CalibrationReview.test.tsx` around lines 72 -
74, The test currently waits for screen.getByText(/calibration report/i) which
matches both the heading and the loading message; replace this with an explicit
role-based wait so the test waits for the actual heading. Update the assertion
that uses waitFor + screen.getByText to instead await
screen.findByRole("heading", { name: /calibration report/i }) (or assert its
presence with expect(await screen.findByRole(...)).toBeInTheDocument()) to
ensure you wait specifically for the heading element.

expect(screen.getByText(/1 student response/i)).toBeInTheDocument();
expect(screen.getByRole("tab", { name: /class comparison/i })).toBeInTheDocument();
expect(screen.getByRole("tab", { name: /rubric detail/i })).toBeInTheDocument();
});

test("shows error alert when the API call fails", async () => {
mockGet.mockRejectedValueOnce({
response: { data: { error: "Not authorized" } },
});

renderPage();

await waitFor(() =>
expect(screen.getByText(/unable to load calibration report/i)).toBeInTheDocument()
);
expect(screen.getByText("Not authorized")).toBeInTheDocument();
});
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import { fireEvent, render, screen } from "@testing-library/react";
import { useState } from "react";
import CalibrationRubricDetailPanel from "../components/CalibrationRubricDetailPanel";
import type { ReviewerOption, RubricDetailRow } from "../calibrationReportNormalize";

describe("CalibrationRubricDetailPanel", () => {
const reviewerOptions: ReviewerOption[] = [
{ value: 21, label: "Reviewer Alpha", mapId: 9, responseId: 101 },
{ value: 22, label: "Reviewer Beta", mapId: 10, responseId: 102 },
];

const rowsByReviewer: Record<number, RubricDetailRow[]> = {
21: [
{
itemId: 1,
itemLabel: "Code quality",
itemSeq: 1,
instructorScore: 4,
instructorComment: "Clear implementation",
studentScore: 3,
studentComment: "Mostly clear",
agreeCount: 2,
nearCount: 3,
disagreeCount: 1,
noScoreCount: 0,
totalResponses: 6,
averageScore: 3.2,
},
],
22: [
{
itemId: 1,
itemLabel: "Code quality",
itemSeq: 1,
instructorScore: 4,
instructorComment: "",
studentScore: null,
studentComment: "",
agreeCount: 1,
nearCount: 1,
disagreeCount: 2,
noScoreCount: 2,
totalResponses: 6,
averageScore: 2.5,
},
],
};

const PanelHarness = () => {
const [selectedReviewerId, setSelectedReviewerId] = useState<number | null>(21);

return (
<CalibrationRubricDetailPanel
reviewerOptions={reviewerOptions}
selectedReviewerId={selectedReviewerId}
rows={selectedReviewerId ? rowsByReviewer[selectedReviewerId] : []}
onReviewerChange={setSelectedReviewerId}
/>
);
};

test("renders the default reviewer comparison rows", () => {
render(<PanelHarness />);

expect(screen.getByText(/rubric detail/i)).toBeInTheDocument();
expect(screen.getByTestId("selected-reviewer-summary")).toHaveTextContent("Reviewer Alpha");
expect(screen.getByText("1. Code quality")).toBeInTheDocument();
expect(screen.getByText("Clear implementation")).toBeInTheDocument();
expect(screen.getByText("Mostly clear")).toBeInTheDocument();
expect(screen.getByText("1 below")).toBeInTheDocument();
expect(screen.getByText("Green Agree: 2")).toBeInTheDocument();
expect(screen.getByText("Yellow Near: 3")).toBeInTheDocument();
expect(screen.getByText("Red Disagree: 1")).toBeInTheDocument();
});

test("switches reviewers and updates the comparison rows", () => {
render(<PanelHarness />);

fireEvent.change(screen.getByLabelText(/student reviewer/i), {
target: { value: "22" },
});

expect(screen.getByTestId("selected-reviewer-summary")).toHaveTextContent("Reviewer Beta");
expect(screen.getAllByText("No comments")).toHaveLength(2);
expect(screen.getAllByText("N/A")).toHaveLength(2);
});
});
Loading