diff --git a/assets/javascripts/discourse/connectors/composer-fields/persona-llm-selector.gjs b/assets/javascripts/discourse/connectors/composer-fields/persona-llm-selector.gjs index a5240197..951d754a 100644 --- a/assets/javascripts/discourse/connectors/composer-fields/persona-llm-selector.gjs +++ b/assets/javascripts/discourse/connectors/composer-fields/persona-llm-selector.gjs @@ -152,7 +152,7 @@ export default class BotSelector extends Component { resetTargetRecipients() { if (this.allowLLMSelector) { const botUsername = this.currentUser.ai_enabled_chat_bots.find( - (bot) => bot.model_name === this.llm + (bot) => bot.id === this.llm ).username; this.composer.set("targetRecipients", botUsername); } else { @@ -170,7 +170,7 @@ export default class BotSelector extends Component { return availableBots.map((bot) => { return { - id: bot.model_name, + id: bot.id, name: bot.display_name, }; }); diff --git a/lib/completions/endpoints/base.rb b/lib/completions/endpoints/base.rb index 439b54f5..58ff9c2a 100644 --- a/lib/completions/endpoints/base.rb +++ b/lib/completions/endpoints/base.rb @@ -7,7 +7,11 @@ module DiscourseAi attr_reader :partial_tool_calls CompletionFailed = Class.new(StandardError) - TIMEOUT = 60 + # 6 minutes + # Reasoning LLMs can take a very long time to respond, generally it will be under 5 minutes + # The alternative is to have per LLM timeouts but that would make it extra confusing for people + # configuring. Let's try this simple solution first. + TIMEOUT = 360 class << self def endpoint_for(provider_name)