mirror of
https://github.com/bytedance/deer-flow.git
synced 2026-04-28 20:58:16 +00:00
refactor(feedback): inline feedback on history and drop positional mapping
The old ``useThreadFeedback`` hook loaded ``GET /api/threads/{id}/messages?limit=200``
and built two parallel lookup tables: ``runIdByAiIndex`` (an ordinal array of
run_ids for every ``ai_message``-typed event) and ``feedbackByRunId``. The render
loop in ``message-list.tsx`` walked the AI messages in order, incrementing
``aiMessageIndex`` on each non-human message, and used that ordinal to look up
the run_id and feedback.
This shape had three latent bugs we could observe on real threads:
1. **Fetch was capped at 200 messages.** Long or tool-heavy threads silently
dropped earlier entries from the map, so feedback buttons could be missing
on messages they should own.
2. **Ordinal mismatch.** The render loop counted every non-human message
(including each intermediate ``ai_tool_call``), but ``runIdByAiIndex`` only
pushed entries for ``event_type == "ai_message"``. A run with 3 tool_calls
+ 1 final AI message would push 1 entry while the render consumed 4
positions, so buttons mapped to the wrong positions across multi-run
threads.
3. **Two parallel data paths.** The ``/history`` render path and the
``/messages`` feedback-lookup path could drift in-between an
``invalidateQueries`` call and the next refetch, producing transient
mismaps.
The previous commit moved the authoritative message source for history to
the event store and added ``run_id`` + ``feedback`` inline on each message
dict returned by ``_get_event_store_messages``. This commit aligns the
frontend with that contract:
- **Delete** ``useThreadFeedback``, ``ThreadFeedbackData``,
``runIdByAiIndex``, ``feedbackByRunId``, and ``fetchAllThreadMessages``.
- **Introduce** ``useThreadMessageEnrichment`` that fetches
``POST /history?limit=1`` once, indexes the returned messages by
``message.id`` into a ``Map<id, {run_id, feedback?}>``, and invalidates
on stream completion (``onFinish`` in ``useThreadStream``). Keying by
``message.id`` is stable across runs, tool_call chains, and summarize.
- **Simplify** ``message-list.tsx`` to drop the ``aiMessageIndex``
counter and read ``enrichment?.get(msg.id)`` at each render step.
- **Rewire** ``message-list-item.tsx`` so the feedback button renders
when ``feedback !== undefined`` rather than when the message happens
to be non-human. ``feedback`` is ``undefined`` for non-eligible
messages (humans, non-final AI, tools), ``null`` for the final
ai_message of an unrated run, and a ``FeedbackData`` object once
rated — cleanly distinguishing "not eligible" from "eligible but
unrated".
``/api/threads/{id}/messages`` is kept as a debug/export surface; no
frontend code calls it anymore but the backend router is untouched.
Validation:
- ``pnpm check`` clean (0 errors, 1 pre-existing unrelated warning)
- Live test on thread ``3d5dea4a`` after gateway restart confirmed the
original user query is restored to position 0 and the feedback
button behaves correctly on the final AI message.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
229c8095be
commit
3580897c56
@ -46,6 +46,12 @@ export function MessageListItem({
|
||||
message: Message;
|
||||
isLoading?: boolean;
|
||||
threadId: string;
|
||||
// ``feedback`` is ``undefined`` for messages that are not feedback-eligible
|
||||
// (non-final AI messages, humans, tool results). It is ``null`` for the
|
||||
// final ai_message of a run that has no rating yet, and a FeedbackData
|
||||
// object once rated. The button renders whenever the field is present.
|
||||
feedback?: FeedbackData | null;
|
||||
runId?: string;
|
||||
}) {
|
||||
const isHuman = message.type === "human";
|
||||
return (
|
||||
@ -74,11 +80,11 @@ export function MessageListItem({
|
||||
""
|
||||
}
|
||||
/>
|
||||
{!isHuman && runId && threadId && (
|
||||
{feedback !== undefined && runId && threadId && (
|
||||
<FeedbackButtons
|
||||
threadId={threadId}
|
||||
runId={runId}
|
||||
initialFeedback={feedback ?? null}
|
||||
initialFeedback={feedback}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@ -4,7 +4,6 @@ import {
|
||||
Conversation,
|
||||
ConversationContent,
|
||||
} from "@/components/ai-elements/conversation";
|
||||
import type { FeedbackData } from "@/core/api/feedback";
|
||||
import { useI18n } from "@/core/i18n/hooks";
|
||||
import {
|
||||
extractContentFromMessage,
|
||||
@ -19,7 +18,7 @@ import { useRehypeSplitWordsIntoSpans } from "@/core/rehype";
|
||||
import type { Subtask } from "@/core/tasks";
|
||||
import { useUpdateSubtask } from "@/core/tasks/context";
|
||||
import type { AgentThreadState } from "@/core/threads";
|
||||
import { useThreadFeedback } from "@/core/threads/hooks";
|
||||
import { useThreadMessageEnrichment } from "@/core/threads/hooks";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
import { ArtifactFileList } from "../artifacts/artifact-file-list";
|
||||
@ -48,11 +47,9 @@ export function MessageList({
|
||||
const { t } = useI18n();
|
||||
const rehypePlugins = useRehypeSplitWordsIntoSpans(thread.isLoading);
|
||||
const updateSubtask = useUpdateSubtask();
|
||||
const { data: feedbackData } = useThreadFeedback(threadId);
|
||||
const messages = thread.messages;
|
||||
const { data: enrichment } = useThreadMessageEnrichment(threadId);
|
||||
|
||||
// Track AI message ordinal index for feedback mapping
|
||||
let aiMessageIndex = 0;
|
||||
if (thread.isThreadLoading && messages.length === 0) {
|
||||
return <MessageListSkeleton />;
|
||||
}
|
||||
@ -64,24 +61,21 @@ export function MessageList({
|
||||
{groupMessages(messages, (group) => {
|
||||
if (group.type === "human" || group.type === "assistant") {
|
||||
return group.messages.map((msg) => {
|
||||
let runId: string | undefined;
|
||||
let feedback: FeedbackData | null = null;
|
||||
if (msg.type !== "human" && feedbackData) {
|
||||
runId =
|
||||
feedbackData.runIdByAiIndex[aiMessageIndex] ?? undefined;
|
||||
feedback = runId
|
||||
? (feedbackData.feedbackByRunId[runId] ?? null)
|
||||
: null;
|
||||
aiMessageIndex++;
|
||||
}
|
||||
// Run id and feedback are sourced from the ``/history``
|
||||
// enrichment query (see ``useThreadMessageEnrichment``). The
|
||||
// map is keyed by ``message.id`` so tool_call interleavings
|
||||
// and multi-run threads map cleanly without positional math.
|
||||
// ``feedback`` is ``undefined`` for non-eligible messages,
|
||||
// ``null`` for eligible-but-unrated, and an object once rated.
|
||||
const entry = msg.id ? enrichment?.get(msg.id) : undefined;
|
||||
return (
|
||||
<MessageListItem
|
||||
key={`${group.id}/${msg.id}`}
|
||||
threadId={threadId}
|
||||
message={msg}
|
||||
isLoading={thread.isLoading}
|
||||
runId={runId}
|
||||
feedback={feedback}
|
||||
runId={entry?.run_id}
|
||||
feedback={entry?.feedback}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
@ -8,6 +8,7 @@ import { toast } from "sonner";
|
||||
import type { PromptInputMessage } from "@/components/ai-elements/prompt-input";
|
||||
|
||||
import { getAPIClient } from "../api";
|
||||
import type { FeedbackData } from "../api/feedback";
|
||||
import { fetchWithAuth } from "../api/fetcher";
|
||||
import { getBackendBaseURL } from "../config";
|
||||
import { useI18n } from "../i18n/hooks";
|
||||
@ -294,7 +295,9 @@ export function useThreadStream({
|
||||
onFinish(state) {
|
||||
listeners.current.onFinish?.(state.values);
|
||||
void queryClient.invalidateQueries({ queryKey: ["threads", "search"] });
|
||||
void queryClient.invalidateQueries({ queryKey: ["thread-feedback"] });
|
||||
void queryClient.invalidateQueries({
|
||||
queryKey: ["thread-message-enrichment"],
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
@ -680,52 +683,62 @@ export function useRenameThread() {
|
||||
});
|
||||
}
|
||||
|
||||
export interface ThreadFeedbackData {
|
||||
/** Maps AI message ordinal index (0-based, counting only AI messages) to run_id */
|
||||
runIdByAiIndex: string[];
|
||||
/** Maps run_id to feedback data */
|
||||
feedbackByRunId: Record<
|
||||
string,
|
||||
{ feedback_id: string; rating: number; comment: string | null }
|
||||
>;
|
||||
/** Per-message enrichment data attached by the backend ``/history`` helper. */
|
||||
export interface MessageEnrichment {
|
||||
run_id: string;
|
||||
/** ``undefined`` = not feedback-eligible; ``null`` = eligible but unrated. */
|
||||
feedback?: FeedbackData | null;
|
||||
}
|
||||
|
||||
export function useThreadFeedback(threadId: string | null | undefined) {
|
||||
/**
|
||||
* Fetch ``/history`` once and index feedback + run_id by message id.
|
||||
*
|
||||
* Replaces the old ``useThreadFeedback`` hook which keyed by AI-message
|
||||
* ordinal position — an inherently fragile mapping that broke whenever
|
||||
* ``ai_tool_call`` messages were interleaved with ``ai_message`` messages.
|
||||
* Keying by ``message.id`` is stable regardless of run count, tool-call
|
||||
* chains, or summarization.
|
||||
*
|
||||
* The ``/history`` response is refreshed on every stream completion via
|
||||
* ``invalidateQueries(["thread-message-enrichment"])`` in ``onFinish``.
|
||||
*/
|
||||
export function useThreadMessageEnrichment(
|
||||
threadId: string | null | undefined,
|
||||
) {
|
||||
return useQuery({
|
||||
queryKey: ["thread-feedback", threadId],
|
||||
queryFn: async (): Promise<ThreadFeedbackData> => {
|
||||
const empty: ThreadFeedbackData = {
|
||||
runIdByAiIndex: [],
|
||||
feedbackByRunId: {},
|
||||
};
|
||||
queryKey: ["thread-message-enrichment", threadId],
|
||||
queryFn: async (): Promise<Map<string, MessageEnrichment>> => {
|
||||
const empty = new Map<string, MessageEnrichment>();
|
||||
if (!threadId) return empty;
|
||||
const res = await fetchWithAuth(
|
||||
`${getBackendBaseURL()}/api/threads/${encodeURIComponent(threadId)}/messages?limit=200`,
|
||||
`${getBackendBaseURL()}/api/threads/${encodeURIComponent(threadId)}/history`,
|
||||
{
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({ limit: 1 }),
|
||||
},
|
||||
);
|
||||
if (!res.ok) return empty;
|
||||
const messages: Array<{
|
||||
run_id: string;
|
||||
event_type: string;
|
||||
feedback?: {
|
||||
feedback_id: string;
|
||||
rating: number;
|
||||
comment: string | null;
|
||||
} | null;
|
||||
}> = await res.json();
|
||||
const runIdByAiIndex: string[] = [];
|
||||
const feedbackByRunId: Record<
|
||||
string,
|
||||
{ feedback_id: string; rating: number; comment: string | null }
|
||||
> = {};
|
||||
for (const msg of messages) {
|
||||
if (msg.event_type === "ai_message") {
|
||||
runIdByAiIndex.push(msg.run_id);
|
||||
}
|
||||
if (msg.feedback && msg.run_id) {
|
||||
feedbackByRunId[msg.run_id] = msg.feedback;
|
||||
}
|
||||
const entries = (await res.json()) as Array<{
|
||||
values?: {
|
||||
messages?: Array<{
|
||||
id?: string;
|
||||
run_id?: string;
|
||||
feedback?: FeedbackData | null;
|
||||
}>;
|
||||
};
|
||||
}>;
|
||||
const messages = entries[0]?.values?.messages ?? [];
|
||||
const map = new Map<string, MessageEnrichment>();
|
||||
for (const m of messages) {
|
||||
if (!m.id || !m.run_id) continue;
|
||||
const entry: MessageEnrichment = { run_id: m.run_id };
|
||||
// Preserve presence: "feedback" key absent → ineligible; present with
|
||||
// null → eligible but unrated; present with object → rated.
|
||||
if ("feedback" in m) entry.feedback = m.feedback;
|
||||
map.set(m.id, entry);
|
||||
}
|
||||
return { runIdByAiIndex, feedbackByRunId };
|
||||
return map;
|
||||
},
|
||||
enabled: !!threadId,
|
||||
staleTime: 30_000,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user