DEV: Disable the plugin by default (#1511)

…and preserve the current setting on existing sites
This commit is contained in:
Jarek Radosz 2025-07-22 12:05:52 +02:00 committed by GitHub
parent cc77e73cfd
commit f231aad8b5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
238 changed files with 570 additions and 128 deletions

View File

@ -1,6 +1,6 @@
discourse_ai:
discourse_ai_enabled:
default: true
default: false
client: true
ai_artifact_security:
client: true

View File

@ -0,0 +1,23 @@
# frozen_string_literal: true
class EnableAiIfAlreadyInstalled < ActiveRecord::Migration[7.2]
def up
installed_at = DB.query_single(<<~SQL)&.first
SELECT created_at FROM schema_migration_details WHERE version='20230224165056'
SQL
if installed_at && installed_at < 1.hour.ago
# The plugin was installed before we changed it to be disabled-by-default
# Therefore, if there is no existing database value, enable the plugin
execute <<~SQL
INSERT INTO site_settings(name, data_type, value, created_at, updated_at)
VALUES('discourse_ai_enabled', 5, 't', NOW(), NOW())
ON CONFLICT (name) DO NOTHING
SQL
end
end
def down
raise ActiveRecord::IrreversibleMigration
end
end

View File

@ -10,6 +10,8 @@ RSpec.describe DiscourseAi::Configuration::Feature do
DiscourseAi::Completions::Llm.with_prepared_responses(["OK"]) { block.call }
end
before { enable_current_plugin }
describe "#llm_model" do
context "when persona is not found" do
it "returns nil when persona_id is invalid" do

View File

@ -8,6 +8,8 @@ RSpec.describe DiscourseAi::Configuration::LlmEnumerator do
Fabricate(:automation, script: "llm_report", name: "some automation", enabled: true)
end
before { enable_current_plugin }
describe "#values_for_serialization" do
it "returns an array for that can be used for serialization" do
fake_model.destroy!

View File

@ -1,6 +1,8 @@
# frozen_string_literal: true
RSpec.describe DiscourseAi::Configuration::LlmValidator do
before { enable_current_plugin }
describe "#valid_value?" do
context "when the parent module is enabled and we try to reset the selected model" do
before do

View File

@ -3,6 +3,8 @@
RSpec.describe DiscourseAi::Configuration::SpamDetectionValidator do
let(:validator) { described_class.new }
before { enable_current_plugin }
it "always returns true if setting the value to false" do
expect(validator.valid_value?("f")).to eq(true)
end

View File

@ -8,11 +8,14 @@ require Rails.root.join(
RSpec.describe MigrateSentimentClassificationResultFormat do
let(:connection) { ActiveRecord::Base.connection }
before { connection.execute(<<~SQL) }
before do
enable_current_plugin
connection.execute(<<~SQL)
INSERT INTO classification_results (model_used, classification, created_at, updated_at) VALUES
('sentiment', '{"neutral": 65, "negative": 20, "positive": 14}', NOW(), NOW()),
('emotion', '{"sadness": 10, "surprise": 15, "fear": 5, "anger": 20, "joy": 30, "disgust": 8, "neutral": 10}', NOW(), NOW());
SQL
end
after { connection.execute("DELETE FROM classification_results") }

View File

@ -21,6 +21,8 @@ RSpec.describe FixBrokenOpenAiEmbeddingsConfig do
).first
end
before { enable_current_plugin }
describe "#up" do
context "when embeddings are already configured" do
fab!(:embedding_definition)

View File

@ -8,6 +8,8 @@ require Rails.root.join(
RSpec.describe CleanUnusedEmbeddingSearchIndexes do
let(:connection) { ActiveRecord::Base.connection }
before { enable_current_plugin }
describe "#up" do
before do
# Copied from 20241008054440_create_binary_indexes_for_embeddings

View File

@ -7,7 +7,7 @@ describe Jobs::DetectTranslatePost do
let(:locales) { %w[en ja] }
before do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end

View File

@ -7,7 +7,7 @@ describe Jobs::DetectTranslateTopic do
let(:locales) { %w[en ja] }
before do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end
@ -32,7 +32,6 @@ describe Jobs::DetectTranslateTopic do
end
it "detects locale" do
SiteSetting.discourse_ai_enabled = true
allow(DiscourseAi::Translation::TopicLocaleDetector).to receive(:detect_locale).with(
topic,
).and_return("zh_CN")

View File

@ -20,6 +20,8 @@ RSpec.describe Jobs::DigestRagUpload do
end
before do
enable_current_plugin
SiteSetting.ai_embeddings_selected_model = cloudflare_embedding_def.id
SiteSetting.ai_embeddings_enabled = true
SiteSetting.authorized_extensions = "txt"

View File

@ -7,6 +7,7 @@ RSpec.describe Jobs::FastTrackTopicGist do
fab!(:post_2) { Fabricate(:post, topic: topic_1, post_number: 2) }
before do
enable_current_plugin
assign_fake_provider_to(:ai_summarization_model)
SiteSetting.ai_summarization_enabled = true
SiteSetting.ai_summary_gists_enabled = true

View File

@ -5,7 +5,10 @@ RSpec.describe Jobs::GenerateInferredConcepts do
fab!(:post)
fab!(:concept) { Fabricate(:inferred_concept, name: "programming") }
before { SiteSetting.inferred_concepts_enabled = true }
before do
enable_current_plugin
SiteSetting.inferred_concepts_enabled = true
end
describe "#execute" do
it "does nothing with blank item_ids" do

View File

@ -1,6 +1,8 @@
# frozen_string_literal: true
RSpec.describe Jobs::GenerateRagEmbeddings do
before { enable_current_plugin }
describe "#execute" do
fab!(:vector_def) { Fabricate(:embedding_definition) }

View File

@ -10,7 +10,7 @@ describe Jobs::LocalizeCategories do
end
before do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end

View File

@ -7,7 +7,7 @@ describe Jobs::LocalizePosts do
let(:locales) { %w[en ja de] }
before do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end

View File

@ -7,7 +7,7 @@ describe Jobs::LocalizeTopics do
let(:locales) { %w[en ja de] }
before do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end

View File

@ -3,6 +3,8 @@
RSpec.describe Jobs::ManageEmbeddingDefSearchIndex do
fab!(:embedding_definition)
before { enable_current_plugin }
describe "#execute" do
context "when there is no embedding def" do
it "does nothing" do

View File

@ -3,7 +3,10 @@
RSpec.describe Jobs::StreamComposerHelper do
subject(:job) { described_class.new }
before { assign_fake_provider_to(:ai_helper_model) }
before do
enable_current_plugin
assign_fake_provider_to(:ai_helper_model)
end
describe "#execute" do
let!(:input) { "I liek to eet pie fur brakefast becuz it is delishus." }

View File

@ -17,6 +17,7 @@ RSpec.describe Jobs::StreamDiscordReply, type: :job do
fab!(:persona) { Fabricate(:ai_persona, default_llm_id: llm_model.id) }
before do
enable_current_plugin
SiteSetting.ai_discord_search_enabled = true
SiteSetting.ai_discord_search_mode = "persona"
SiteSetting.ai_discord_search_persona = persona.id

View File

@ -3,6 +3,8 @@
RSpec.describe Jobs::StreamDiscoverReply do
subject(:job) { described_class.new }
before { enable_current_plugin }
describe "#execute" do
fab!(:user)
fab!(:llm_model)

View File

@ -3,7 +3,10 @@
RSpec.describe Jobs::StreamPostHelper do
subject(:job) { described_class.new }
before { assign_fake_provider_to(:ai_helper_model) }
before do
enable_current_plugin
assign_fake_provider_to(:ai_helper_model)
end
describe "#execute" do
fab!(:topic)

View File

@ -3,6 +3,8 @@
RSpec.describe Jobs::StreamTopicAiSummary do
subject(:job) { described_class.new }
before { enable_current_plugin }
describe "#execute" do
fab!(:topic) { Fabricate(:topic, highest_post_number: 2) }
fab!(:post_1) { Fabricate(:post, topic: topic, post_number: 1) }

View File

@ -5,7 +5,7 @@ describe Jobs::CategoriesLocaleDetectionBackfill do
subject(:job) { described_class.new }
before do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end

View File

@ -24,6 +24,8 @@ RSpec.describe Jobs::EmbeddingsBackfill do
fab!(:embedding_array) { Array.new(1024) { 1 } }
before do
enable_current_plugin
SiteSetting.ai_embeddings_selected_model = vector_def.id
SiteSetting.ai_embeddings_enabled = true
SiteSetting.ai_embeddings_backfill_batch_size = 1

View File

@ -5,6 +5,7 @@ RSpec.describe Jobs::GenerateConceptsFromPopularItems do
fab!(:post) { Fabricate(:post, like_count: 8, post_number: 2) }
before do
enable_current_plugin
SiteSetting.inferred_concepts_enabled = true
SiteSetting.inferred_concepts_daily_topics_limit = 20
SiteSetting.inferred_concepts_daily_posts_limit = 30

View File

@ -2,13 +2,13 @@
describe Jobs::PostLocalizationBackfill do
before do
enable_current_plugin
SiteSetting.ai_translation_backfill_hourly_rate = 100
SiteSetting.content_localization_supported_locales = "en"
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end
SiteSetting.ai_translation_enabled = true
SiteSetting.discourse_ai_enabled = true
end
it "does not enqueue post translation when translator disabled" do
@ -36,7 +36,6 @@ describe Jobs::PostLocalizationBackfill do
end
it "does not enqueue post translation if backfill limit is set to 0" do
SiteSetting.discourse_ai_enabled = true
SiteSetting.ai_translation_enabled = true
SiteSetting.ai_translation_backfill_hourly_rate = 0
@ -46,7 +45,6 @@ describe Jobs::PostLocalizationBackfill do
end
it "enqueues post translation with correct limit" do
SiteSetting.discourse_ai_enabled = true
SiteSetting.ai_translation_enabled = true
SiteSetting.ai_translation_backfill_hourly_rate = 100

View File

@ -5,7 +5,7 @@ describe Jobs::PostsLocaleDetectionBackfill do
subject(:job) { described_class.new }
before do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end

View File

@ -1,6 +1,8 @@
# frozen_string_literal: true
RSpec.describe Jobs::RemoveOrphanedEmbeddings do
before { enable_current_plugin }
describe "#execute" do
fab!(:embedding_definition)
fab!(:embedding_definition_2) { Fabricate(:embedding_definition) }

View File

@ -3,6 +3,8 @@
require_relative "../../support/sentiment_inference_stubs"
RSpec.describe Jobs::SentimentBackfill do
before { enable_current_plugin }
describe "#execute" do
fab!(:post)

View File

@ -8,6 +8,7 @@ RSpec.describe Jobs::SummariesBackfill do
let(:intervals) { 12 } # budget is split into intervals. Job runs every five minutes.
before do
enable_current_plugin
assign_fake_provider_to(:ai_summarization_model)
SiteSetting.ai_summarization_enabled = true
SiteSetting.ai_summary_backfill_maximum_topics_per_hour = limit

View File

@ -5,7 +5,7 @@ describe Jobs::TopicsLocaleDetectionBackfill do
subject(:job) { described_class.new }
before do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
Fabricate(:fake_model).tap do |fake_llm|
SiteSetting.public_send("ai_translation_model=", "custom:#{fake_llm.id}")
end

View File

@ -6,13 +6,14 @@ RSpec.describe Jobs::SharedConversationAdjustUploadSecurity do
fab!(:claude_2) { Fabricate(:llm_model, name: "claude-2") }
fab!(:bot_user) do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
toggle_enabled_bots(bots: [claude_2])
SiteSetting.ai_bot_enabled = true
SiteSetting.ai_bot_allowed_groups = "10"
SiteSetting.ai_bot_public_sharing_allowed_groups = "10"
claude_2.reload.user
end
fab!(:user)
fab!(:topic) { Fabricate(:private_message_topic, user: user, recipient: bot_user) }
fab!(:post_1) { Fabricate(:post, topic: topic, user: bot_user) }
@ -23,6 +24,8 @@ RSpec.describe Jobs::SharedConversationAdjustUploadSecurity do
described_class.new.execute(params)
end
before { enable_current_plugin }
context "when conversation is created" do
let(:params) { { conversation_id: conversation.id } }

View File

@ -1,6 +1,8 @@
# frozen_string_literal: true
describe DiscourseAi::Completions::AnthropicMessageProcessor do
before { enable_current_plugin }
it "correctly handles and combines partial thinking chunks into complete thinking objects" do
processor =
DiscourseAi::Completions::AnthropicMessageProcessor.new(

View File

@ -3,7 +3,10 @@
describe DiscourseAi::Completions::CancelManager do
fab!(:model) { Fabricate(:anthropic_model, name: "test-model") }
before { WebMock.allow_net_connect! }
before do
enable_current_plugin
WebMock.allow_net_connect!
end
it "can stop monitoring for cancellation cleanly" do
cancel_manager = DiscourseAi::Completions::CancelManager.new

View File

@ -6,6 +6,8 @@ RSpec.describe DiscourseAi::Completions::Dialects::ChatGpt do
fab!(:llm_model) { Fabricate(:llm_model, max_prompt_tokens: 8192) }
let(:context) { DialectContext.new(described_class, llm_model) }
before { enable_current_plugin }
describe "#translate" do
it "translates a prompt written in our generic format to the ChatGPT format" do
open_ai_version = [

View File

@ -3,9 +3,9 @@
RSpec.describe DiscourseAi::Completions::Dialects::Claude do
fab!(:llm_model) { Fabricate(:anthropic_model, name: "claude-3-opus") }
let :opus_dialect_klass do
DiscourseAi::Completions::Dialects::Dialect.dialect_for(llm_model)
end
let(:opus_dialect_klass) { DiscourseAi::Completions::Dialects::Dialect.dialect_for(llm_model) }
before { enable_current_plugin }
describe "#translate" do
it "can insert OKs to make stuff interleve properly" do

View File

@ -27,6 +27,8 @@ end
RSpec.describe DiscourseAi::Completions::Dialects::Dialect do
fab!(:llm_model)
before { enable_current_plugin }
describe "#translate" do
let(:five_token_msg) { "This represents five tokens." }
let(:tools) do

View File

@ -6,6 +6,8 @@ RSpec.describe DiscourseAi::Completions::Dialects::Gemini do
fab!(:model) { Fabricate(:gemini_model) }
let(:context) { DialectContext.new(described_class, model) }
before { enable_current_plugin }
describe "#translate" do
it "translates a prompt written in our generic format to the Gemini format" do
gemini_version = {

View File

@ -11,6 +11,8 @@ RSpec.describe DiscourseAi::Completions::Dialects::Mistral do
UploadCreator.new(image100x100, "image.jpg").create_for(Discourse.system_user.id)
end
before { enable_current_plugin }
it "does not include user names" do
prompt =
DiscourseAi::Completions::Prompt.new(

View File

@ -5,6 +5,8 @@ RSpec.describe DiscourseAi::Completions::Dialects::Nova do
let(:nova_dialect_klass) { DiscourseAi::Completions::Dialects::Dialect.dialect_for(llm_model) }
before { enable_current_plugin }
it "finds the right dialect" do
expect(nova_dialect_klass).to eq(DiscourseAi::Completions::Dialects::Nova)
end

View File

@ -7,6 +7,8 @@ RSpec.describe DiscourseAi::Completions::Dialects::Ollama do
let(:context) { DialectContext.new(described_class, model) }
let(:dialect_class) { DiscourseAi::Completions::Dialects::Dialect.dialect_for(model) }
before { enable_current_plugin }
describe "#translate" do
context "when native tool support is enabled" do
it "translates a prompt written in our generic format to the Ollama format" do

View File

@ -3,6 +3,8 @@
require_relative "dialect_context"
RSpec.describe DiscourseAi::Completions::Dialects::OllamaTools do
before { enable_current_plugin }
describe "#translated_tools" do
it "translates a tool from our generic format to the Ollama format" do
tool = {

View File

@ -1,6 +1,8 @@
# frozen_string_literal: true
RSpec.describe DiscourseAi::Completions::Dialects::OpenAiCompatible do
before { enable_current_plugin }
context "when system prompts are disabled" do
fab!(:model) do
Fabricate(:vllm_model, vision_enabled: true, provider_params: { disable_system_prompt: true })

View File

@ -47,6 +47,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Anthropic do
prompt_with_tools
end
before { enable_current_plugin }
it "does not eat spaces with tool calls" do
body = <<~STRING
event: message_start

View File

@ -26,6 +26,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
Aws::EventStream::Encoder.new.encode(aws_message)
end
before { enable_current_plugin }
it "should provide accurate max token count" do
prompt = DiscourseAi::Completions::Prompt.new("hello")
dialect = DiscourseAi::Completions::Dialects::Claude.new(prompt, model)

View File

@ -58,6 +58,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Cohere do
prompt
end
before { enable_current_plugin }
it "is able to trigger a tool" do
body = (<<~TEXT).strip
{"is_finished":false,"event_type":"stream-start","generation_id":"1648206e-1fe4-4bb6-90cf-360dd55f575b"}

View File

@ -153,6 +153,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Gemini do
}
end
before { enable_current_plugin }
it "correctly configures thinking when enabled" do
model.update!(provider_params: { enable_thinking: "true", thinking_tokens: "10000" })

View File

@ -95,6 +95,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::HuggingFace do
)
end
before { enable_current_plugin }
describe "#perform_completion!" do
context "when using regular mode" do
context "with simple prompts" do

View File

@ -27,6 +27,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::AwsBedrock do
Aws::EventStream::Encoder.new.encode(aws_message)
end
before { enable_current_plugin }
it "should be able to make a simple request" do
proxy = DiscourseAi::Completions::Llm.proxy("custom:#{nova_model.id}")

View File

@ -135,6 +135,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Ollama do
EndpointsCompliance.new(self, endpoint, DiscourseAi::Completions::Dialects::Ollama, user)
end
before { enable_current_plugin }
describe "#perform_completion!" do
context "when using regular mode" do
it "completes a trivial prompt and logs the response" do

View File

@ -30,6 +30,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
prompt
end
before { enable_current_plugin }
it "can perform simple streaming completion" do
response_payload = <<~TEXT
event: response.created

View File

@ -174,6 +174,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenAi do
UploadCreator.new(image100x100, "image.jpg").create_for(Discourse.system_user.id)
end
before { enable_current_plugin }
describe "max tokens for reasoning models" do
it "uses max_completion_tokens for reasoning models" do
model.update!(name: "o3-mini", max_output_tokens: 999)

View File

@ -6,6 +6,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::OpenRouter do
subject(:endpoint) { described_class.new(open_router_model) }
before { enable_current_plugin }
it "supports provider quantization and order selection" do
open_router_model.provider_params["provider_quantizations"] = "int8,int16"
open_router_model.provider_params["provider_order"] = "Google, Amazon Bedrock"

View File

@ -4,6 +4,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::SambaNova do
fab!(:llm_model) { Fabricate(:samba_nova_model) }
let(:llm) { llm_model.to_llm }
before { enable_current_plugin }
it "can stream completions" do
body = <<~PARTS
data: {"id": "4c5e4a44-e847-467d-b9cd-d2f6530678cd", "object": "chat.completion.chunk", "created": 1721336361, "model": "llama3-8b", "system_fingerprint": "fastcoe", "choices": [{"index": 0, "delta": {"content": "I am a bot"}, "logprobs": null, "finish_reason": null}]}

View File

@ -88,6 +88,8 @@ RSpec.describe DiscourseAi::Completions::Endpoints::Vllm do
let(:request_body) { model.default_options.merge(messages: prompt).to_json }
let(:stream_request_body) { model.default_options.merge(messages: prompt, stream: true).to_json }
before { enable_current_plugin }
describe "tool support" do
it "is able to invoke XML tools correctly" do
xml = <<~XML

View File

@ -3,6 +3,8 @@
describe DiscourseAi::Completions::JsonStreamDecoder do
let(:decoder) { DiscourseAi::Completions::JsonStreamDecoder.new }
before { enable_current_plugin }
it "should be able to parse simple messages" do
result = decoder << "data: #{{ hello: "world" }.to_json}"
expect(result).to eq([{ hello: "world" }])

View File

@ -13,6 +13,8 @@ RSpec.describe DiscourseAi::Completions::Llm do
fab!(:user)
fab!(:model) { Fabricate(:llm_model) }
before { enable_current_plugin }
describe ".proxy" do
it "raises an exception when we can't proxy the model" do
fake_model = "unknown:unknown_v2"

View File

@ -14,6 +14,8 @@ describe DiscourseAi::Completions::PromptMessagesBuilder do
Fabricate(:upload, user: user, original_filename: "image.png", extension: "png")
end
before { enable_current_plugin }
it "correctly merges user messages with uploads" do
builder.push(type: :user, content: "Hello", id: "Alice", upload_ids: [1])
builder.push(type: :user, content: "World", id: "Bob", upload_ids: [2])

View File

@ -8,6 +8,8 @@ RSpec.describe DiscourseAi::Completions::Prompt do
let(:username) { "username1" }
let(:image100x100) { plugin_file_from_fixtures("100x100.jpg") }
before { enable_current_plugin }
describe ".new" do
it "raises for invalid attributes" do
expect { described_class.new("a bot", messages: {}) }.to raise_error(ArgumentError)

View File

@ -26,6 +26,8 @@ RSpec.describe DiscourseAi::Completions::StructuredOutput do
)
end
before { enable_current_plugin }
describe "Parsing structured output on the fly" do
it "acts as a buffer for an streamed JSON" do
chunks = [

View File

@ -1,6 +1,8 @@
# frozen_string_literal: true
RSpec.describe DiscourseAi::Completions::ToolDefinition do
before { enable_current_plugin }
# Test case 1: Basic tool definition creation
describe "#initialize" do
it "creates a tool with name, description and parameters" do

View File

@ -5,6 +5,8 @@ RSpec.describe DiscourseAi::Completions::UploadEncoder do
let(:jpg) { plugin_file_from_fixtures("1x1.jpg") }
let(:webp) { plugin_file_from_fixtures("1x1.webp") }
before { enable_current_plugin }
it "automatically converts gifs to pngs" do
upload = UploadCreator.new(gif, "1x1.gif").create_for(Discourse.system_user.id)
encoded = described_class.encode(upload_ids: [upload.id], max_pixels: 1_048_576)

View File

@ -3,6 +3,8 @@
describe DiscourseAi::Completions::PromptMessagesBuilder do
let(:tag_stripper) { DiscourseAi::Completions::XmlTagStripper.new(%w[thinking results]) }
before { enable_current_plugin }
it "should strip tags correctly in simple cases" do
result = tag_stripper << "x<thinking>hello</thinki"
expect(result).to eq("x")

View File

@ -3,6 +3,8 @@
RSpec.describe DiscourseAi::Completions::XmlToolProcessor do
let(:processor) { DiscourseAi::Completions::XmlToolProcessor.new }
before { enable_current_plugin }
it "can process simple text" do
result = []
result << (processor << "hello")

View File

@ -12,6 +12,7 @@ RSpec.describe DiscourseAi::Discord::Bot::PersonaReplier do
fab!(:persona) { Fabricate(:ai_persona, default_llm_id: llm_model.id) }
before do
enable_current_plugin
SiteSetting.ai_discord_search_persona = persona.id.to_s
allow_any_instance_of(DiscourseAi::Personas::Bot).to receive(:reply).and_return(
"This is a reply from bot!",

View File

@ -9,6 +9,8 @@ RSpec.describe DiscourseAi::Discord::Bot::Search do
let(:search) { described_class.new(interaction_body) }
before do
enable_current_plugin
stub_request(:post, "https://discord.com/api/webhooks//interaction_token").with(
body:
"{\"content\":\"Here are the top search results for your query:\\n\\n1. [Title](\\u003chttp://test.localhost/link\\u003e)\\n\\n\"}",

View File

@ -3,6 +3,8 @@
require "rails_helper"
RSpec.describe DiscourseAi::Automation do
before { enable_current_plugin }
describe "manually configured model" do
let!(:llm_model) { Fabricate(:llm_model) }
it "returns a list of available models for automation" do

View File

@ -39,6 +39,8 @@ describe DiscourseAi::Automation::LlmPersonaTriage do
end
before do
enable_current_plugin
SiteSetting.ai_bot_enabled = true
SiteSetting.ai_bot_allowed_groups = "#{Group::AUTO_GROUPS[:trust_level_0]}"

View File

@ -21,6 +21,8 @@ describe DiscourseAutomation do
)
end
before { enable_current_plugin }
it "can trigger via automation" do
add_automation_field("sender", user.username, type: "user")
add_automation_field("receivers", [user.username], type: "email_group_user")

View File

@ -52,7 +52,7 @@ RSpec.describe DiscourseAi::Automation::LlmToolTriage do
end
before do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
SiteSetting.ai_bot_enabled = true
end

View File

@ -24,6 +24,8 @@ describe DiscourseAi::Automation::LlmTriage do
end
before do
enable_current_plugin
SiteSetting.tagging_enabled = true
add_automation_field("system_prompt", "hello %%POST%%")
add_automation_field("search_for_text", "bad")

View File

@ -6,6 +6,7 @@ describe DiscourseAi::GuardianExtensions do
fab!(:topic)
before do
enable_current_plugin
group.add(user)
assign_fake_provider_to(:ai_summarization_model)
SiteSetting.ai_summarization_enabled = true

View File

@ -23,6 +23,8 @@ RSpec.describe DiscourseAi::Inference::CloudflareWorkersAi do
let(:payload) { { text: [content] }.to_json }
before do
enable_current_plugin
stub_request(:post, endpoint).with(body: payload, headers: headers).to_return(
status: response_status,
body: response_body,

View File

@ -11,6 +11,8 @@ RSpec.describe DiscourseAi::InferredConcepts::Applier do
fab!(:llm_model) { Fabricate(:fake_model) }
before do
enable_current_plugin
SiteSetting.inferred_concepts_match_persona = -1
SiteSetting.inferred_concepts_enabled = true
@ -140,7 +142,9 @@ RSpec.describe DiscourseAi::InferredConcepts::Applier do
persona_class_double = double("PersonaClass") # rubocop:disable RSpec/VerifiedDoubles
allow(AiPersona).to receive(:all_personas).and_return([persona_class_double])
allow(persona_class_double).to receive(:id).and_return(SiteSetting.inferred_concepts_match_persona.to_i)
allow(persona_class_double).to receive(:id).and_return(
SiteSetting.inferred_concepts_match_persona.to_i,
)
allow(persona_class_double).to receive(:new).and_return(persona_instance_double)
allow(persona_class_double).to receive(:default_llm_id).and_return(llm_model.id)
allow(persona_instance_double).to receive(:class).and_return(persona_class_double)
@ -190,7 +194,9 @@ RSpec.describe DiscourseAi::InferredConcepts::Applier do
persona_class_double = double("PersonaClass") # rubocop:disable RSpec/VerifiedDoubles
allow(AiPersona).to receive(:all_personas).and_return([persona_class_double])
allow(persona_class_double).to receive(:id).and_return(SiteSetting.inferred_concepts_match_persona.to_i)
allow(persona_class_double).to receive(:id).and_return(
SiteSetting.inferred_concepts_match_persona.to_i,
)
allow(persona_class_double).to receive(:new).and_return(persona_instance_double)
allow(persona_class_double).to receive(:default_llm_id).and_return(llm_model.id)
allow(persona_instance_double).to receive(:class).and_return(persona_class_double)
@ -228,7 +234,9 @@ RSpec.describe DiscourseAi::InferredConcepts::Applier do
bot_double = instance_spy(DiscourseAi::Personas::Bot)
allow(AiPersona).to receive(:all_personas).and_return([persona_class_double])
allow(persona_class_double).to receive(:id).and_return(SiteSetting.inferred_concepts_match_persona.to_i)
allow(persona_class_double).to receive(:id).and_return(
SiteSetting.inferred_concepts_match_persona.to_i,
)
allow(persona_class_double).to receive(:new).and_return(persona_instance_double)
allow(persona_class_double).to receive(:default_llm_id).and_return(llm_model.id)
allow(persona_instance_double).to receive(:class).and_return(persona_class_double)
@ -261,7 +269,9 @@ RSpec.describe DiscourseAi::InferredConcepts::Applier do
bot_double = instance_double("DiscourseAi::Personas::Bot")
allow(AiPersona).to receive(:all_personas).and_return([persona_class_double])
allow(persona_class_double).to receive(:id).and_return(SiteSetting.inferred_concepts_match_persona.to_i)
allow(persona_class_double).to receive(:id).and_return(
SiteSetting.inferred_concepts_match_persona.to_i,
)
allow(persona_class_double).to receive(:new).and_return(persona_instance_double)
allow(persona_class_double).to receive(:default_llm_id).and_return(llm_model.id)
allow(persona_instance_double).to receive(:class).and_return(persona_class_double)
@ -283,7 +293,9 @@ RSpec.describe DiscourseAi::InferredConcepts::Applier do
bot_double = instance_double("DiscourseAi::Personas::Bot")
allow(AiPersona).to receive(:all_personas).and_return([persona_class_double])
allow(persona_class_double).to receive(:id).and_return(SiteSetting.inferred_concepts_match_persona.to_i)
allow(persona_class_double).to receive(:id).and_return(
SiteSetting.inferred_concepts_match_persona.to_i,
)
allow(persona_class_double).to receive(:new).and_return(persona_instance_double)
allow(persona_class_double).to receive(:default_llm_id).and_return(llm_model.id)
allow(persona_instance_double).to receive(:class).and_return(persona_class_double)
@ -305,7 +317,9 @@ RSpec.describe DiscourseAi::InferredConcepts::Applier do
bot_double = instance_double("DiscourseAi::Personas::Bot")
allow(AiPersona).to receive(:all_personas).and_return([persona_class_double])
allow(persona_class_double).to receive(:id).and_return(SiteSetting.inferred_concepts_match_persona.to_i)
allow(persona_class_double).to receive(:id).and_return(
SiteSetting.inferred_concepts_match_persona.to_i,
)
allow(persona_class_double).to receive(:new).and_return(persona_instance_double)
allow(persona_class_double).to receive(:default_llm_id).and_return(llm_model.id)
allow(persona_instance_double).to receive(:class).and_return(persona_class_double)

View File

@ -10,6 +10,7 @@ RSpec.describe DiscourseAi::InferredConcepts::Finder do
fab!(:llm_model) { Fabricate(:fake_model) }
before do
enable_current_plugin
SiteSetting.inferred_concepts_generate_persona = -1
SiteSetting.inferred_concepts_deduplicate_persona = -1
SiteSetting.inferred_concepts_enabled = true

View File

@ -8,6 +8,8 @@ RSpec.describe DiscourseAi::InferredConcepts::Manager do
fab!(:concept1) { Fabricate(:inferred_concept, name: "programming") }
fab!(:concept2) { Fabricate(:inferred_concept, name: "testing") }
before { enable_current_plugin }
describe "#list_concepts" do
it "returns all concepts sorted by name" do
concepts = manager.list_concepts

View File

@ -1,6 +1,8 @@
# frozen_string_literal: true
RSpec.describe DiscourseAi::AiBot::EntryPoint do
before { enable_current_plugin }
describe "#inject_into" do
describe "subscribes to the post_created event" do
fab!(:admin)

View File

@ -2,7 +2,9 @@
RSpec.describe Jobs::CreateAiReply do
fab!(:gpt_35_bot) { Fabricate(:llm_model, name: "gpt-3.5-turbo") }
before do
enable_current_plugin
SiteSetting.ai_bot_enabled = true
toggle_enabled_bots(bots: [gpt_35_bot])
end

View File

@ -14,6 +14,7 @@ RSpec.describe DiscourseAi::AiBot::Playground do
fab!(:opus_model) { Fabricate(:anthropic_model) }
fab!(:bot_user) do
enable_current_plugin
toggle_enabled_bots(bots: [claude_2])
SiteSetting.ai_bot_enabled = true
claude_2.reload.user
@ -58,7 +59,10 @@ RSpec.describe DiscourseAi::AiBot::Playground do
)
end
before { SiteSetting.ai_embeddings_enabled = false }
before do
enable_current_plugin
SiteSetting.ai_embeddings_enabled = false
end
after do
# we must reset cache on persona cause data can be rolled back

View File

@ -9,7 +9,7 @@ describe DiscourseAi::AiBot::SiteSettingsExtension do
DiscourseAi::AiBot::EntryPoint.find_user_from_model(model).present?
end
before { SiteSetting.discourse_ai_enabled = true }
before { enable_current_plugin }
it "correctly creates/deletes bot accounts as needed" do
SiteSetting.ai_bot_enabled = true

View File

@ -5,6 +5,7 @@ RSpec.describe DiscourseAi::AiHelper::Assistant do
fab!(:empty_locale_user) { Fabricate(:user, locale: "") }
before do
enable_current_plugin
assign_fake_provider_to(:ai_helper_model)
Group.refresh_automatic_groups!
end

View File

@ -3,12 +3,15 @@
RSpec.describe DiscourseAi::AiHelper::ChatThreadTitler do
subject(:titler) { described_class.new(thread) }
before { assign_fake_provider_to(:ai_helper_model) }
fab!(:thread) { Fabricate(:chat_thread) }
fab!(:chat_message) { Fabricate(:chat_message, thread: thread) }
fab!(:user)
before do
enable_current_plugin
assign_fake_provider_to(:ai_helper_model)
end
describe "#suggested_title" do
it "bails early if thread has no content" do
empty_thread = Chat::Thread.new

View File

@ -6,6 +6,8 @@ RSpec.describe DiscourseAi::AiHelper::DateFormatter do
# Reference time is Tuesday Jan 16th, 2024 at 2:30 PM Sydney time
let(:sydney_reference) { DateTime.parse("2024-01-16 14:30:00 +11:00") }
before { enable_current_plugin }
describe ".process_date_placeholders" do
describe "with Sydney timezone" do
before do

View File

@ -4,6 +4,8 @@ describe DiscourseAi::AiHelper::EntryPoint do
fab!(:english_user) { Fabricate(:user) }
fab!(:french_user) { Fabricate(:user, locale: "fr") }
before { enable_current_plugin }
it "will correctly localize available prompts" do
assign_fake_provider_to(:ai_helper_model)
SiteSetting.default_locale = "en"

View File

@ -6,6 +6,7 @@ RSpec.describe DiscourseAi::AiHelper::Painter do
fab!(:user)
before do
enable_current_plugin
assign_fake_provider_to(:ai_helper_model)
SiteSetting.ai_stability_api_url = "https://api.stability.dev"
SiteSetting.ai_stability_api_key = "abc"

View File

@ -20,6 +20,8 @@ RSpec.describe DiscourseAi::AiHelper::SemanticCategorizer do
let(:expected_embedding) { [0.0038493] * vector.vdef.dimensions }
before do
enable_current_plugin
SiteSetting.ai_embeddings_selected_model = vector_def.id
SiteSetting.ai_embeddings_enabled = true

View File

@ -19,7 +19,7 @@ RSpec.describe DiscourseAi::AiModeration::SpamScanner do
end
before do
SiteSetting.discourse_ai_enabled = true
enable_current_plugin
SiteSetting.ai_spam_detection_enabled = true
end

View File

@ -8,6 +8,8 @@ describe DiscourseAi::Automation::LlmTriage do
DiscourseAi::Automation::LlmTriage.handle(**args)
end
before { enable_current_plugin }
it "does nothing if it does not pass triage" do
DiscourseAi::Completions::Llm.with_prepared_responses(["good"]) do
triage(

View File

@ -43,6 +43,8 @@ module DiscourseAi
fab!(:post_with_likes3) { Fabricate(:post, topic: topic_with_likes, like_count: 3) }
before { enable_current_plugin }
if defined?(::DiscourseSolved)
it "will correctly denote solved topics" do
Fabricate(:solved_topic, topic: topic_with_likes, answer_post: post_with_likes2)

View File

@ -34,6 +34,8 @@ module DiscourseAi
fab!(:llm_model)
before { enable_current_plugin }
describe "#run!" do
it "is able to generate email reports" do
freeze_time

View File

@ -5,7 +5,10 @@ describe DiscourseAi::Embeddings::EntryPoint do
fab!(:embedding_definition)
before { SiteSetting.ai_embeddings_selected_model = embedding_definition.id }
before do
enable_current_plugin
SiteSetting.ai_embeddings_selected_model = embedding_definition.id
end
describe "registering event callbacks" do
context "when creating a topic" do

View File

@ -5,6 +5,8 @@ RSpec.describe Jobs::GenerateEmbeddings do
fab!(:vector_def) { Fabricate(:embedding_definition) }
before { enable_current_plugin }
describe "#execute" do
before do
SiteSetting.ai_embeddings_selected_model = vector_def.id

View File

@ -8,9 +8,11 @@ RSpec.describe DiscourseAi::Embeddings::Schema do
fab!(:post) { Fabricate(:post, post_number: 1) }
let(:digest) { OpenSSL::Digest.hexdigest("SHA1", "test") }
before { SiteSetting.ai_embeddings_selected_model = vector_def.id }
before { posts_schema.store(post, embeddings, digest) }
before do
enable_current_plugin
SiteSetting.ai_embeddings_selected_model = vector_def.id
posts_schema.store(post, embeddings, digest)
end
describe "#find_by_target" do
it "gets you the post_id of the record that matches the post" do

View File

@ -16,6 +16,7 @@ describe DiscourseAi::Embeddings::SemanticRelated do
fab!(:vector_def) { Fabricate(:embedding_definition) }
before do
enable_current_plugin
SiteSetting.ai_embeddings_semantic_related_topics_enabled = true
SiteSetting.ai_embeddings_selected_model = vector_def.id
SiteSetting.ai_embeddings_enabled = true

View File

@ -10,6 +10,7 @@ RSpec.describe DiscourseAi::Embeddings::SemanticSearch do
fab!(:vector_def) { Fabricate(:embedding_definition) }
before do
enable_current_plugin
SiteSetting.ai_embeddings_selected_model = vector_def.id
assign_fake_provider_to(:ai_embeddings_semantic_search_hyde_model)
end

View File

@ -3,6 +3,8 @@
describe DiscourseAi::Embeddings::EntryPoint do
fab!(:user)
before { enable_current_plugin }
describe "SemanticTopicQuery extension" do
describe "#list_semantic_related_topics" do
subject(:topic_query) { DiscourseAi::Embeddings::SemanticTopicQuery.new(user) }

View File

@ -6,6 +6,8 @@ RSpec.describe DiscourseAi::Embeddings::Strategies::Truncation do
fab!(:open_ai_embedding_def)
let(:prefix) { "I come first:" }
before { enable_current_plugin }
describe "#prepare_target_text" do
before { SiteSetting.max_post_length = 100_000 }

View File

@ -1,6 +1,8 @@
# frozen_string_literal: true
RSpec.describe DiscourseAi::Embeddings::Vector do
before { enable_current_plugin }
shared_examples "generates and store embeddings using a vector definition" do
subject(:vector) { described_class.new(vdef) }

Some files were not shown because too many files have changed in this diff Show More