DEV: Add spam

This commit is contained in:
Keegan George 2025-06-09 12:32:27 -07:00
parent 0fc854dec4
commit c3cd5d65da
No known key found for this signature in database
GPG Key ID: 91B40E38537AC000
3 changed files with 110 additions and 1 deletions

View File

@ -10,6 +10,11 @@ module DiscourseAi
end end
def update def update
# Get the initial settings for logging changes
initial_settings = AiModerationSetting.spam
initial_custom_instructions = initial_settings&.data&.dig("custom_instructions")
initial_llm_model_id = initial_settings&.llm_model_id
updated_params = {} updated_params = {}
if allowed_params.key?(:llm_model_id) if allowed_params.key?(:llm_model_id)
llm_model_id = updated_params[:llm_model_id] = allowed_params[:llm_model_id] llm_model_id = updated_params[:llm_model_id] = allowed_params[:llm_model_id]
@ -36,6 +41,9 @@ module DiscourseAi
else else
AiModerationSetting.create!(updated_params.merge(setting_type: :spam)) AiModerationSetting.create!(updated_params.merge(setting_type: :spam))
end end
# Log any changes to custom_instructions or llm_model_id
log_ai_spam_update(initial_llm_model_id, initial_custom_instructions, allowed_params)
end end
is_enabled = ActiveModel::Type::Boolean.new.cast(allowed_params[:is_enabled]) is_enabled = ActiveModel::Type::Boolean.new.cast(allowed_params[:is_enabled])
@ -113,6 +121,34 @@ module DiscourseAi
private private
def log_ai_spam_update(initial_llm_model_id, initial_custom_instructions, params)
# Track changes for logging
changes_to_log = {}
# Only track llm_model_id changes when it's in the params AND has actually changed
if params.key?(:llm_model_id) && initial_llm_model_id.to_s != params[:llm_model_id].to_s
# Get model names for better logging
old_model_name = LlmModel.find_by(id: initial_llm_model_id)&.display_name || initial_llm_model_id
new_model_name = LlmModel.find_by(id: params[:llm_model_id])&.display_name || params[:llm_model_id]
changes_to_log[:llm_model_id] = "#{old_model_name}#{new_model_name}"
end
# Only track custom_instructions changes when it's in the params AND has actually changed
if params.key?(:custom_instructions) && initial_custom_instructions != params[:custom_instructions]
changes_to_log[:custom_instructions] = params[:custom_instructions]
end
# Log the changes if any were made to llm_model_id or custom_instructions
if changes_to_log.present?
# Log the changes using StaffActionLogger (without a subject as requested)
StaffActionLogger.new(current_user).log_custom(
"update_ai_spam_settings",
changes_to_log
)
end
end
def allowed_params def allowed_params
params.permit(:is_enabled, :llm_model_id, :custom_instructions) params.permit(:is_enabled, :llm_model_id, :custom_instructions)
end end

View File

@ -39,6 +39,7 @@ en:
create_ai_embedding: "Create AI embedding" create_ai_embedding: "Create AI embedding"
update_ai_embedding: "Update AI embedding" update_ai_embedding: "Update AI embedding"
delete_ai_embedding: "Delete AI embedding" delete_ai_embedding: "Delete AI embedding"
update_ai_spam_settings: "Update AI spam settings"
js: js:
discourse_automation: discourse_automation:

View File

@ -119,6 +119,78 @@ RSpec.describe DiscourseAi::Admin::AiSpamController do
"custom instructions new", "custom instructions new",
) )
end end
it "logs staff action when custom_instructions change" do
put "/admin/plugins/discourse-ai/ai-spam.json",
params: {
is_enabled: true,
llm_model_id: llm_model.id,
custom_instructions: "updated instructions"
}
expect(response.status).to eq(200)
# Verify the log was created with the right subject
history = UserHistory.where(action: UserHistory.actions[:custom_staff], custom_type: "update_ai_spam_settings").last
expect(history).to be_present
expect(history.subject).to eq("AI Spam Detection")
expect(history.details).to include("custom_instructions_changed")
end
it "logs staff action when llm_model_id changes" do
# Create another model to change to
new_llm_model = Fabricate(:llm_model, name: "New Test Model", display_name: "New Test Model")
put "/admin/plugins/discourse-ai/ai-spam.json",
params: {
llm_model_id: new_llm_model.id
}
expect(response.status).to eq(200)
# Verify the log was created with the right subject
history = UserHistory.where(action: UserHistory.actions[:custom_staff], custom_type: "update_ai_spam_settings").last
expect(history).to be_present
expect(history.subject).to eq("AI Spam Detection")
expect(history.details).to include("llm_model_id")
end
it "does not log staff action when only is_enabled changes" do
# Check initial count of logs
initial_count = UserHistory.where(action: UserHistory.actions[:custom_staff], custom_type: "update_ai_spam_settings").count
# Update only the is_enabled setting
put "/admin/plugins/discourse-ai/ai-spam.json",
params: {
is_enabled: false
}
expect(response.status).to eq(200)
# Verify no new log was created
current_count = UserHistory.where(action: UserHistory.actions[:custom_staff], custom_type: "update_ai_spam_settings").count
expect(current_count).to eq(initial_count)
end
it "logs both custom_instructions and llm_model_id changes in one entry" do
# Create another model to change to
new_llm_model = Fabricate(:llm_model, name: "Another Test Model", display_name: "Another Test Model")
put "/admin/plugins/discourse-ai/ai-spam.json",
params: {
llm_model_id: new_llm_model.id,
custom_instructions: "new instructions for both changes"
}
expect(response.status).to eq(200)
# Verify the log was created with all changes
history = UserHistory.where(action: UserHistory.actions[:custom_staff], custom_type: "update_ai_spam_settings").last
expect(history).to be_present
expect(history.subject).to eq("AI Spam Detection")
expect(history.details).to include("llm_model_id")
expect(history.details).to include("custom_instructions_changed")
end
end end
end end
end end