Refine topic context and purchase followup guards

This commit is contained in:
2026-03-12 20:24:50 +04:00
parent 120c6cf96d
commit 401bbbdcca
8 changed files with 522 additions and 12 deletions

View File

@@ -54,6 +54,12 @@ describe('createOpenAiChatAssistant', () => {
expect(capturedBody!.model).toBe('gpt-5-mini') expect(capturedBody!.model).toBe('gpt-5-mini')
expect(capturedBody!.input[0]?.role).toBe('system') expect(capturedBody!.input[0]?.role).toBe('system')
expect(capturedBody!.input[0]?.content).toContain('Default to one to three short sentences.') expect(capturedBody!.input[0]?.content).toContain('Default to one to three short sentences.')
expect(capturedBody!.input[0]?.content).toContain(
'Do not ask the user to repeat information that is already present in the provided conversation history.'
)
expect(capturedBody!.input[0]?.content).toContain(
'Treat wishes, plans, tomorrow-talk, approximate future prices, and thinking aloud as plans, not completed purchases or payments.'
)
expect(capturedBody!.input[0]?.content).toContain( expect(capturedBody!.input[0]?.content).toContain(
'There is no general feature for creating or scheduling arbitrary personal reminders' 'There is no general feature for creating or scheduling arbitrary personal reminders'
) )

View File

@@ -93,6 +93,11 @@ const ASSISTANT_SYSTEM_PROMPT = [
'Default to one to three short sentences.', 'Default to one to three short sentences.',
'For simple greetings or small talk, reply in a single short sentence unless the user asks for more.', 'For simple greetings or small talk, reply in a single short sentence unless the user asks for more.',
'If the user is joking or testing you, you may answer playfully in one short sentence.', 'If the user is joking or testing you, you may answer playfully in one short sentence.',
'When the user refers to something said above, earlier, already mentioned, or in the dialog, answer from the provided conversation history if the answer is there.',
'For dialogue-memory questions, prioritize recent topic thread messages first, then same-day chat history, then per-user memory summary.',
'Do not ask the user to repeat information that is already present in the provided conversation history.',
'Treat wishes, plans, tomorrow-talk, approximate future prices, and thinking aloud as plans, not completed purchases or payments.',
'If the user is only discussing a possible future purchase, respond naturally instead of collecting missing purchase fields.',
'If the user tells you to stop, back off briefly and do not keep asking follow-up questions.', 'If the user tells you to stop, back off briefly and do not keep asking follow-up questions.',
'Do not repeat the same clarification after the user declines, backs off, or says they are only thinking.', 'Do not repeat the same clarification after the user declines, backs off, or says they are only thinking.',
'Do not restate the full household context unless the user explicitly asks for details.', 'Do not restate the full household context unless the user explicitly asks for details.',
@@ -140,13 +145,6 @@ export function createOpenAiChatAssistant(
topicCapabilityNotes(input.topicRole), topicCapabilityNotes(input.topicRole),
'Bounded household context:', 'Bounded household context:',
input.householdContext, input.householdContext,
input.memorySummary ? `Conversation summary:\n${input.memorySummary}` : null,
input.recentTurns.length > 0
? [
'Recent conversation turns:',
...input.recentTurns.map((turn) => `${turn.role}: ${turn.text}`)
].join('\n')
: null,
input.recentThreadMessages && input.recentThreadMessages.length > 0 input.recentThreadMessages && input.recentThreadMessages.length > 0
? [ ? [
'Recent topic thread messages:', 'Recent topic thread messages:',
@@ -164,7 +162,14 @@ export function createOpenAiChatAssistant(
: `${message.speaker} (${message.role}): ${message.text}` : `${message.speaker} (${message.role}): ${message.text}`
) )
].join('\n') ].join('\n')
: null : null,
input.recentTurns.length > 0
? [
'Recent conversation turns:',
...input.recentTurns.map((turn) => `${turn.role}: ${turn.text}`)
].join('\n')
: null,
input.memorySummary ? `Conversation summary:\n${input.memorySummary}` : null
] ]
.filter(Boolean) .filter(Boolean)
.join('\n\n') .join('\n\n')

View File

@@ -32,6 +32,175 @@ describe('createOpenAiPurchaseInterpreter', () => {
) )
}) })
test('returns not_purchase for planning chatter without calling the llm', async () => {
const interpreter = createOpenAiPurchaseInterpreter('test-key', 'gpt-5-mini')
expect(interpreter).toBeDefined()
const originalFetch = globalThis.fetch
let fetchCalls = 0
globalThis.fetch = (async () => {
fetchCalls += 1
return successfulResponse({})
}) as unknown as typeof fetch
try {
const result = await interpreter!('Хочу рыбу. Завтра подумаю, примерно 20 лари.', {
defaultCurrency: 'GEL'
})
expect(result).toEqual<PurchaseInterpretation>({
decision: 'not_purchase',
amountMinor: null,
currency: null,
itemDescription: null,
amountSource: null,
calculationExplanation: null,
confidence: 94,
parserMode: 'llm',
clarificationQuestion: null
})
expect(fetchCalls).toBe(0)
} finally {
globalThis.fetch = originalFetch
}
})
test('returns not_purchase for meta references without calling the llm', async () => {
const interpreter = createOpenAiPurchaseInterpreter('test-key', 'gpt-5-mini')
expect(interpreter).toBeDefined()
const originalFetch = globalThis.fetch
let fetchCalls = 0
globalThis.fetch = (async () => {
fetchCalls += 1
return successfulResponse({})
}) as unknown as typeof fetch
try {
const result = await interpreter!('Я уже сказал выше', {
defaultCurrency: 'GEL'
})
expect(result).toEqual<PurchaseInterpretation>({
decision: 'not_purchase',
amountMinor: null,
currency: null,
itemDescription: null,
amountSource: null,
calculationExplanation: null,
confidence: 94,
parserMode: 'llm',
clarificationQuestion: null
})
expect(fetchCalls).toBe(0)
} finally {
globalThis.fetch = originalFetch
}
})
test('does not short-circuit meta references that also include purchase details', async () => {
const interpreter = createOpenAiPurchaseInterpreter('test-key', 'gpt-5-mini')
expect(interpreter).toBeDefined()
const originalFetch = globalThis.fetch
let fetchCalls = 0
globalThis.fetch = (async () => {
fetchCalls += 1
return successfulResponse({
output: [
{
content: [
{
text: JSON.stringify({
decision: 'purchase',
amountMinor: '3200',
currency: 'GEL',
itemDescription: 'молоко',
confidence: 91,
clarificationQuestion: null
})
}
]
}
]
})
}) as unknown as typeof fetch
try {
const result = await interpreter!('Я уже сказал выше, 32 лари за молоко', {
defaultCurrency: 'GEL'
})
expect(fetchCalls).toBe(1)
expect(result).toEqual<PurchaseInterpretation>({
decision: 'purchase',
amountMinor: 3200n,
currency: 'GEL',
itemDescription: 'молоко',
amountSource: 'explicit',
calculationExplanation: null,
confidence: 91,
parserMode: 'llm',
clarificationQuestion: null
})
} finally {
globalThis.fetch = originalFetch
}
})
test('does not short-circuit approximate clarification answers', async () => {
const interpreter = createOpenAiPurchaseInterpreter('test-key', 'gpt-5-mini')
expect(interpreter).toBeDefined()
const originalFetch = globalThis.fetch
let fetchCalls = 0
globalThis.fetch = (async () => {
fetchCalls += 1
return successfulResponse({
output: [
{
content: [
{
text: JSON.stringify({
decision: 'purchase',
amountMinor: '2000',
currency: 'GEL',
itemDescription: 'молоко',
confidence: 87,
clarificationQuestion: null
})
}
]
}
]
})
}) as unknown as typeof fetch
try {
const result = await interpreter!('примерно 20 лари', {
defaultCurrency: 'GEL',
clarificationContext: {
recentMessages: ['Купил молоко']
}
})
expect(fetchCalls).toBe(1)
expect(result).toEqual<PurchaseInterpretation>({
decision: 'purchase',
amountMinor: 2000n,
currency: 'GEL',
itemDescription: 'молоко',
amountSource: 'explicit',
calculationExplanation: null,
confidence: 87,
parserMode: 'llm',
clarificationQuestion: null
})
} finally {
globalThis.fetch = originalFetch
}
})
test('parses nested responses api content output', async () => { test('parses nested responses api content output', async () => {
const interpreter = createOpenAiPurchaseInterpreter('test-key', 'gpt-5-mini') const interpreter = createOpenAiPurchaseInterpreter('test-key', 'gpt-5-mini')
expect(interpreter).toBeDefined() expect(interpreter).toBeDefined()

View File

@@ -40,6 +40,14 @@ interface OpenAiStructuredResult {
clarificationQuestion: string | null clarificationQuestion: string | null
} }
const PLANNING_ONLY_PATTERN =
/\b(?:want to buy|thinking about|thinking of|plan to buy|planning to buy|going to buy|might buy|tomorrow|later)\b|(?:^|[^\p{L}])(?:(?:хочу|хотим|думаю|планирую|планируем|может)\s+(?:купить|взять|заказать)|(?:подумаю|завтра|потом))(?=$|[^\p{L}])/iu
const COMPLETED_PURCHASE_PATTERN =
/\b(?:bought|purchased|ordered|picked up|grabbed|got|spent|paid)\b|(?:^|[^\p{L}])(?:купил(?:а|и)?|взял(?:а|и)?|заказал(?:а|и)?|потратил(?:а|и)?|заплатил(?:а|и)?|сторговался(?:\s+до)?)(?=$|[^\p{L}])/iu
const META_REFERENCE_PATTERN =
/\b(?:already said(?: above)?|said above|question above|have context|from the dialog(?:ue)?|based on the dialog(?:ue)?)\b|(?:^|[^\p{L}])(?:я\s+уже\s+сказал(?:\s+выше)?|уже\s+сказал(?:\s+выше)?|вопрос\s+выше|это\s+вопрос|контекст(?:\s+диалога)?|основываясь\s+на\s+диалоге)(?=$|[^\p{L}])/iu
const META_REFERENCE_STRIP_PATTERN = new RegExp(META_REFERENCE_PATTERN.source, 'giu')
function asOptionalBigInt(value: string | null): bigint | null { function asOptionalBigInt(value: string | null): bigint | null {
if (value === null || !/^[0-9]+$/.test(value)) { if (value === null || !/^[0-9]+$/.test(value)) {
return null return null
@@ -117,6 +125,33 @@ export function buildPurchaseInterpretationInput(
].join('\n') ].join('\n')
} }
function isBareMetaReference(rawText: string): boolean {
const normalized = rawText.trim()
if (!META_REFERENCE_PATTERN.test(normalized)) {
return false
}
const stripped = normalized
.replace(META_REFERENCE_STRIP_PATTERN, ' ')
.replace(/[\s,.:;!?()[\]{}"'`-]+/gu, ' ')
.trim()
return stripped.length === 0
}
function shouldReturnNotPurchase(rawText: string): boolean {
const normalized = rawText.trim()
if (normalized.length === 0) {
return true
}
if (isBareMetaReference(normalized)) {
return true
}
return PLANNING_ONLY_PATTERN.test(normalized) && !COMPLETED_PURCHASE_PATTERN.test(normalized)
}
export function createOpenAiPurchaseInterpreter( export function createOpenAiPurchaseInterpreter(
apiKey: string | undefined, apiKey: string | undefined,
model: string model: string
@@ -126,6 +161,20 @@ export function createOpenAiPurchaseInterpreter(
} }
return async (rawText, options) => { return async (rawText, options) => {
if (shouldReturnNotPurchase(rawText)) {
return {
decision: 'not_purchase',
amountMinor: null,
currency: null,
itemDescription: null,
amountSource: null,
calculationExplanation: null,
confidence: 94,
parserMode: 'llm',
clarificationQuestion: null
}
}
const response = await fetch('https://api.openai.com/v1/responses', { const response = await fetch('https://api.openai.com/v1/responses', {
method: 'POST', method: 'POST',
headers: { headers: {
@@ -147,6 +196,8 @@ export function createOpenAiPurchaseInterpreter(
'When amountSource is "calculated", also return a short calculationExplanation in the user message language, such as "5 × 6 lari = 30 lari".', 'When amountSource is "calculated", also return a short calculationExplanation in the user message language, such as "5 × 6 lari = 30 lari".',
'Ignore item quantities like rolls, kilograms, or layers unless they are clearly the money amount.', 'Ignore item quantities like rolls, kilograms, or layers unless they are clearly the money amount.',
'Treat colloquial completed-buy phrasing like "взял", "сходил и взял", or "сторговался до X" as a completed purchase when the message reports a real buy fact.', 'Treat colloquial completed-buy phrasing like "взял", "сходил и взял", or "сторговался до X" as a completed purchase when the message reports a real buy fact.',
'Plans, wishes, future intent, tomorrow-talk, and approximate future prices are not purchases. Return not_purchase for those.',
'Meta replies like "I already said above", "the question is above", or "do you have context" are not purchase details. Return not_purchase unless the latest message clearly supplies the missing purchase fact.',
'If recent messages from the same sender are provided, treat them as clarification context for the latest message.', 'If recent messages from the same sender are provided, treat them as clarification context for the latest message.',
'If the latest message is a complete standalone purchase on its own, ignore the earlier clarification context.', 'If the latest message is a complete standalone purchase on its own, ignore the earlier clarification context.',
'If the latest message answers a previous clarification, combine it with the earlier messages to resolve the purchase.', 'If the latest message answers a previous clarification, combine it with the earlier messages to resolve the purchase.',

View File

@@ -0,0 +1,17 @@
import { describe, expect, test } from 'bun:test'
import { shouldLoadExpandedChatHistory } from './topic-history'
describe('shouldLoadExpandedChatHistory', () => {
test('recognizes broader russian dialogue-memory prompts', () => {
expect(shouldLoadExpandedChatHistory('У тебя есть контекст диалога?')).toBe(true)
expect(
shouldLoadExpandedChatHistory('Это вопрос, что я последнее купил, основываясь на диалоге?')
).toBe(true)
expect(shouldLoadExpandedChatHistory('Вопрос выше уже есть')).toBe(true)
})
test('stays false for ordinary purchase chatter', () => {
expect(shouldLoadExpandedChatHistory('Купил молоко за 6 лари')).toBe(false)
})
})

View File

@@ -9,7 +9,7 @@ export interface TopicHistoryTurn {
} }
const MEMORY_LOOKUP_PATTERN = const MEMORY_LOOKUP_PATTERN =
/\b(?:do you remember|remember|what were we talking about|what did we say today)\b|(?:^|[^\p{L}])(?:помнишь|ты\s+помнишь|что\s+мы\s+сегодня\s+обсуждали|о\s+чем\s+мы\s+говорили)(?=$|[^\p{L}])/iu /\b(?:do you remember|remember|what were we talking about|what did we say today|what was the question above|do you have context|based on the dialog(?:ue)?|from the dialog(?:ue)?)\b|(?:^|[^\p{L}])(?:помнишь|ты\s+помнишь|что\s+мы\s+сегодня\s+обсуждали|о\s+чем\s+(?:мы\s+)?говорили|о\s+чем\s+была\s+речь|контекст\s+диалога|у\s+тебя\s+есть\s+контекст(?:\s+диалога)?|основываясь\s+на\s+диалоге|вопрос\s+выше|что\s+было\s+выше)(?=$|[^\p{L}])/iu
export function shouldLoadExpandedChatHistory(text: string): boolean { export function shouldLoadExpandedChatHistory(text: string): boolean {
return MEMORY_LOOKUP_PATTERN.test(text.trim()) return MEMORY_LOOKUP_PATTERN.test(text.trim())

View File

@@ -0,0 +1,174 @@
import { describe, expect, test } from 'bun:test'
import { createOpenAiTopicMessageRouter } from './topic-message-router'
function successfulResponse(payload: unknown): Response {
return new Response(JSON.stringify(payload), {
status: 200,
headers: {
'content-type': 'application/json'
}
})
}
describe('createOpenAiTopicMessageRouter', () => {
test('overrides purchase workflow routes for planning chatter', async () => {
const router = createOpenAiTopicMessageRouter('test-key', 'gpt-5-mini', 20_000)
expect(router).toBeDefined()
const originalFetch = globalThis.fetch
globalThis.fetch = (async () =>
successfulResponse({
output_text: JSON.stringify({
route: 'purchase_candidate',
replyText: null,
helperKind: 'purchase',
shouldStartTyping: true,
shouldClearWorkflow: false,
confidence: 92,
reason: 'llm_purchase_guess'
})
})) as unknown as typeof fetch
try {
const route = await router!({
locale: 'ru',
topicRole: 'purchase',
messageText: 'Я хочу рыбу. Завтра подумаю, примерно 20 лари.',
isExplicitMention: true,
isReplyToBot: false,
activeWorkflow: null
})
expect(route).toMatchObject({
route: 'topic_helper',
helperKind: 'assistant',
shouldStartTyping: true,
shouldClearWorkflow: false,
reason: 'planning_guard'
})
} finally {
globalThis.fetch = originalFetch
}
})
test('overrides purchase followups for meta references to prior context', async () => {
const router = createOpenAiTopicMessageRouter('test-key', 'gpt-5-mini', 20_000)
expect(router).toBeDefined()
const originalFetch = globalThis.fetch
globalThis.fetch = (async () =>
successfulResponse({
output_text: JSON.stringify({
route: 'purchase_followup',
replyText: null,
helperKind: 'purchase',
shouldStartTyping: false,
shouldClearWorkflow: false,
confidence: 89,
reason: 'llm_followup_guess'
})
})) as unknown as typeof fetch
try {
const route = await router!({
locale: 'ru',
topicRole: 'purchase',
messageText: 'Я уже сказал выше',
isExplicitMention: false,
isReplyToBot: true,
activeWorkflow: 'purchase_clarification'
})
expect(route).toMatchObject({
route: 'topic_helper',
helperKind: 'assistant',
shouldStartTyping: true,
shouldClearWorkflow: true,
reason: 'context_reference'
})
} finally {
globalThis.fetch = originalFetch
}
})
test('keeps payment followups when a context reference also includes payment details', async () => {
const router = createOpenAiTopicMessageRouter('test-key', 'gpt-5-mini', 20_000)
expect(router).toBeDefined()
const originalFetch = globalThis.fetch
globalThis.fetch = (async () =>
successfulResponse({
output_text: JSON.stringify({
route: 'payment_followup',
replyText: null,
helperKind: 'payment',
shouldStartTyping: false,
shouldClearWorkflow: false,
confidence: 90,
reason: 'llm_payment_followup'
})
})) as unknown as typeof fetch
try {
const route = await router!({
locale: 'ru',
topicRole: 'payments',
messageText: 'Я уже сказал выше, оплатил 100 лари',
isExplicitMention: false,
isReplyToBot: true,
activeWorkflow: 'payment_clarification'
})
expect(route).toMatchObject({
route: 'payment_followup',
helperKind: 'payment',
shouldStartTyping: false,
shouldClearWorkflow: false,
reason: 'llm_payment_followup'
})
} finally {
globalThis.fetch = originalFetch
}
})
test('keeps purchase followups for approximate clarification answers', async () => {
const router = createOpenAiTopicMessageRouter('test-key', 'gpt-5-mini', 20_000)
expect(router).toBeDefined()
const originalFetch = globalThis.fetch
globalThis.fetch = (async () =>
successfulResponse({
output_text: JSON.stringify({
route: 'purchase_followup',
replyText: null,
helperKind: 'purchase',
shouldStartTyping: true,
shouldClearWorkflow: false,
confidence: 86,
reason: 'llm_purchase_followup'
})
})) as unknown as typeof fetch
try {
const route = await router!({
locale: 'ru',
topicRole: 'purchase',
messageText: 'примерно 20 лари',
isExplicitMention: false,
isReplyToBot: true,
activeWorkflow: 'purchase_clarification'
})
expect(route).toMatchObject({
route: 'purchase_followup',
helperKind: 'purchase',
shouldStartTyping: true,
shouldClearWorkflow: false,
reason: 'llm_purchase_followup'
})
} finally {
globalThis.fetch = originalFetch
}
})
})

View File

@@ -69,11 +69,14 @@ type ContextWithTopicMessageRouteCache = Context & {
const BACKOFF_PATTERN = const BACKOFF_PATTERN =
/\b(?:leave me alone|go away|stop|not now|back off|shut up)\b|(?:^|[^\p{L}])(?:отстань|хватит|не сейчас|замолчи|оставь(?:\s+меня)?\s+в\s+покое)(?=$|[^\p{L}])/iu /\b(?:leave me alone|go away|stop|not now|back off|shut up)\b|(?:^|[^\p{L}])(?:отстань|хватит|не сейчас|замолчи|оставь(?:\s+меня)?\s+в\s+покое)(?=$|[^\p{L}])/iu
const PLANNING_PATTERN = const PLANNING_PATTERN =
/\b(?:want to buy|thinking about buying|thinking of buying|going to buy|plan to buy|might buy)\b|(?:^|[^\p{L}])(?:хочу|думаю|планирую|может)\s+(?:купить|взять|заказать)(?=$|[^\p{L}])/iu /\b(?:want to buy|thinking about buying|thinking of buying|going to buy|plan to buy|might buy|tomorrow|later)\b|(?:^|[^\p{L}])(?:(?:хочу|думаю|планирую|может)\s+(?:купить|взять|заказать)|(?:подумаю|завтра|потом))(?=$|[^\p{L}])/iu
const LIKELY_PURCHASE_PATTERN = const LIKELY_PURCHASE_PATTERN =
/\b(?:bought|ordered|picked up|spent|paid)\b|(?:^|[^\p{L}])(?:купил(?:а|и)?|взял(?:а|и)?|заказал(?:а|и)?|потратил(?:а|и)?|заплатил(?:а|и)?|сторговался(?:\s+до)?)(?=$|[^\p{L}])/iu /\b(?:bought|ordered|picked up|spent|paid)\b|(?:^|[^\p{L}])(?:купил(?:а|и)?|взял(?:а|и)?|заказал(?:а|и)?|потратил(?:а|и)?|заплатил(?:а|и)?|сторговался(?:\s+до)?)(?=$|[^\p{L}])/iu
const LIKELY_PAYMENT_PATTERN = const LIKELY_PAYMENT_PATTERN =
/\b(?:paid rent|paid utilities|rent paid|utilities paid)\b|(?:^|[^\p{L}])(?:оплатил(?:а|и)?|заплатил(?:а|и)?)(?=$|[^\p{L}])/iu /\b(?:paid rent|paid utilities|rent paid|utilities paid)\b|(?:^|[^\p{L}])(?:оплатил(?:а|и)?|заплатил(?:а|и)?)(?=$|[^\p{L}])/iu
const CONTEXT_REFERENCE_PATTERN =
/\b(?:already said(?: above)?|said above|question above|do you have context|from the dialog(?:ue)?|based on the dialog(?:ue)?)\b|(?:^|[^\p{L}])(?:контекст(?:\s+диалога)?|у\s+тебя\s+есть\s+контекст(?:\s+диалога)?|основываясь\s+на\s+диалоге|я\s+уже\s+сказал(?:\s+выше)?|уже\s+сказал(?:\s+выше)?|вопрос\s+выше|вопрос\s+уже\s+есть|это\s+вопрос|ответь\s+на\s+него)(?=$|[^\p{L}])/iu
const CONTEXT_REFERENCE_STRIP_PATTERN = new RegExp(CONTEXT_REFERENCE_PATTERN.source, 'giu')
const LETTER_PATTERN = /\p{L}/u const LETTER_PATTERN = /\p{L}/u
const DIRECT_BOT_ADDRESS_PATTERN = const DIRECT_BOT_ADDRESS_PATTERN =
/^\s*(?:(?:ну|эй|слышь|слушай|hey|yo)\s*,?\s*)*(?:бот|bot)(?=$|[^\p{L}])/iu /^\s*(?:(?:ну|эй|слышь|слушай|hey|yo)\s*,?\s*)*(?:бот|bot)(?=$|[^\p{L}])/iu
@@ -123,6 +126,83 @@ function fallbackReply(locale: 'en' | 'ru', kind: 'backoff' | 'watching'): strin
: "I'm here. If there's a real purchase or payment, I'll jump in." : "I'm here. If there's a real purchase or payment, I'll jump in."
} }
function isBareContextReference(text: string): boolean {
const normalized = text.trim()
if (!CONTEXT_REFERENCE_PATTERN.test(normalized)) {
return false
}
const stripped = normalized
.replace(CONTEXT_REFERENCE_STRIP_PATTERN, ' ')
.replace(/[\s,.:;!?()[\]{}"'`-]+/gu, ' ')
.trim()
return stripped.length === 0
}
function isPlanningMessage(text: string): boolean {
const normalized = text.trim()
return PLANNING_PATTERN.test(normalized) && !LIKELY_PURCHASE_PATTERN.test(normalized)
}
function assistantFallbackRoute(
input: TopicMessageRoutingInput,
reason: string,
shouldClearWorkflow: boolean
): TopicMessageRoutingResult {
const shouldReply = input.isExplicitMention || input.isReplyToBot || input.activeWorkflow !== null
return shouldReply
? {
route: 'topic_helper',
replyText: null,
helperKind: 'assistant',
shouldStartTyping: true,
shouldClearWorkflow,
confidence: 88,
reason
}
: {
route: 'silent',
replyText: null,
helperKind: null,
shouldStartTyping: false,
shouldClearWorkflow,
confidence: 88,
reason
}
}
function applyRouteGuards(
input: TopicMessageRoutingInput,
route: TopicMessageRoutingResult
): TopicMessageRoutingResult {
const normalized = input.messageText.trim()
if (normalized.length === 0) {
return route
}
if (
isBareContextReference(normalized) &&
(route.route === 'purchase_candidate' ||
route.route === 'purchase_followup' ||
route.route === 'payment_candidate' ||
route.route === 'payment_followup')
) {
return assistantFallbackRoute(input, 'context_reference', input.activeWorkflow !== null)
}
if (
input.topicRole === 'purchase' &&
isPlanningMessage(normalized) &&
(route.route === 'purchase_candidate' || route.route === 'purchase_followup')
) {
return assistantFallbackRoute(input, 'planning_guard', input.activeWorkflow !== null)
}
return route
}
export function fallbackTopicMessageRoute( export function fallbackTopicMessageRoute(
input: TopicMessageRoutingInput input: TopicMessageRoutingInput
): TopicMessageRoutingResult { ): TopicMessageRoutingResult {
@@ -153,7 +233,15 @@ export function fallbackTopicMessageRoute(
} }
} }
if (isBareContextReference(normalized)) {
return assistantFallbackRoute(input, 'context_reference', input.activeWorkflow !== null)
}
if (input.topicRole === 'purchase') { if (input.topicRole === 'purchase') {
if (input.activeWorkflow === 'purchase_clarification' && isPlanningMessage(normalized)) {
return assistantFallbackRoute(input, 'planning_guard', true)
}
if (input.activeWorkflow === 'purchase_clarification') { if (input.activeWorkflow === 'purchase_clarification') {
return { return {
route: 'purchase_followup', route: 'purchase_followup',
@@ -442,7 +530,7 @@ export function createOpenAiTopicMessageRouter(
? parsedObject.replyText.trim() ? parsedObject.replyText.trim()
: null : null
return { return applyRouteGuards(input, {
route, route,
replyText, replyText,
helperKind: helperKind:
@@ -455,7 +543,7 @@ export function createOpenAiTopicMessageRouter(
typeof parsedObject.confidence === 'number' ? parsedObject.confidence : null typeof parsedObject.confidence === 'number' ? parsedObject.confidence : null
), ),
reason: typeof parsedObject.reason === 'string' ? parsedObject.reason : null reason: typeof parsedObject.reason === 'string' ? parsedObject.reason : null
} })
} catch { } catch {
return fallbackTopicMessageRoute(input) return fallbackTopicMessageRoute(input)
} finally { } finally {