sojorn/_legacy/supabase/functions/tone-check/index.ts
2026-02-15 00:33:24 -06:00

203 lines
6.2 KiB
TypeScript

/// <reference types="https://deno.land/x/deno@v1.28.0/cli/dts/lib.deno.d.ts" />
import { serve } from 'https://deno.land/std@0.168.0/http/server.ts'
const OPENAI_MODERATION_URL = 'https://api.openai.com/v1/moderations'
const ALLOWED_ORIGIN = Deno.env.get('ALLOWED_ORIGIN') || 'https://sojorn.net';
const corsHeaders = {
'Access-Control-Allow-Origin': ALLOWED_ORIGIN,
'Access-Control-Allow-Headers': 'authorization, x-client-info, apikey, content-type',
}
type ModerationCategory = 'bigotry' | 'nsfw' | 'violence'
interface ModerationResult {
flagged: boolean
category?: ModerationCategory
flags: string[]
reason: string
}
// Basic keyword-based fallback (when OpenAI is unavailable)
function basicModeration(text: string): ModerationResult {
const lowerText = text.toLowerCase()
const flags: string[] = []
// Slurs and hate speech patterns (basic detection)
const slurPatterns = [
/\bn+[i1]+g+[aegr]+/i,
/\bf+[a4]+g+[s$o0]+t/i,
/\br+[e3]+t+[a4]+r+d/i,
// Add more patterns as needed
]
for (const pattern of slurPatterns) {
if (pattern.test(text)) {
return {
flagged: true,
category: 'bigotry',
flags: ['hate-speech'],
reason: 'This content contains hate speech or slurs.',
}
}
}
// Targeted profanity/attacks
const attackPatterns = [
/\b(fuck|screw|damn)\s+(you|u|your|ur)\b/i,
/\byou('re| are)\s+(a |an )?(fucking |damn |stupid |idiot|moron|dumb)/i,
/\b(kill|hurt|attack|destroy)\s+(you|yourself)\b/i,
/\byou\s+should\s+(die|kill|hurt)/i,
]
for (const pattern of attackPatterns) {
if (pattern.test(text)) {
flags.push('harassment')
return {
flagged: true,
category: 'bigotry',
flags,
reason: 'This content appears to be harassing or attacking someone.',
}
}
}
// Positive indicators
const positiveWords = ['thank', 'appreciate', 'love', 'support', 'grateful', 'amazing', 'wonderful']
const hasPositive = positiveWords.some(word => lowerText.includes(word))
if (hasPositive) {
return {
flagged: false,
flags: [],
reason: 'Content approved',
}
}
// Default: Allow
return {
flagged: false,
flags: [],
reason: 'Content approved',
}
}
serve(async (req: Request) => {
// Handle CORS
if (req.method === 'OPTIONS') {
return new Response('ok', {
headers: {
...corsHeaders,
'Access-Control-Allow-Methods': 'POST OPTIONS',
},
})
}
try {
const { text, imageUrl } = await req.json() as { text: string; imageUrl?: string }
if (!text || text.trim().length === 0) {
return new Response(
JSON.stringify({ error: 'Text is required' }),
{ status: 400, headers: { ...corsHeaders, 'Content-Type': 'application/json' } }
)
}
const openAiKey = Deno.env.get('OPEN_AI')
// Try OpenAI Moderation API first (if key available)
if (openAiKey) {
try {
console.log('Attempting OpenAI moderation check...')
const input: Array<{ type: 'text'; text: string } | { type: 'image_url'; image_url: { url: string } }> = [
{ type: 'text', text },
]
if (imageUrl) {
input.push({
type: 'image_url',
image_url: { url: imageUrl },
})
}
const moderationResponse = await fetch(OPENAI_MODERATION_URL, {
method: 'POST',
headers: {
'Authorization': `Bearer ${openAiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
input,
model: 'omni-moderation-latest',
}),
})
if (moderationResponse.ok) {
const moderationData = await moderationResponse.json()
const results = moderationData.results?.[0]
if (results) {
const categories = results.categories || {}
const flags = Object.entries(categories)
.filter(([, value]) => value === true)
.map(([key]) => key)
const isHate = categories.hate || categories['hate/threatening']
const isHarassment = categories.harassment || categories['harassment/threatening']
const isSexual = categories.sexual || categories['sexual/minors']
const isViolence = categories.violence || categories['violence/graphic']
let category: ModerationCategory | undefined
let reason = 'Content approved'
const flagged = Boolean(isHate || isHarassment || isSexual || isViolence)
if (flagged) {
if (isHate || isHarassment) {
category = 'bigotry'
reason = 'Potential hate or harassment detected.'
} else if (isSexual) {
category = 'nsfw'
reason = 'Potential sexual content detected.'
} else if (isViolence) {
category = 'violence'
reason = 'Potential violent content detected.'
}
}
console.log('OpenAI moderation successful:', { flagged, category })
return new Response(JSON.stringify({ flagged, category, flags, reason }), {
headers: { ...corsHeaders, 'Content-Type': 'application/json' },
})
}
} else {
const errorText = await moderationResponse.text()
console.error('OpenAI API error:', moderationResponse.status, errorText)
}
} catch (error) {
console.error('OpenAI moderation failed:', error)
}
}
// Fallback to basic keyword moderation
console.log('Using basic keyword moderation (OpenAI unavailable)')
const result = basicModeration(text)
return new Response(JSON.stringify(result), {
headers: { ...corsHeaders, 'Content-Type': 'application/json' },
})
} catch (e) {
console.error('Error in tone-check function:', e)
// Fail CLOSED: Reject content when moderation fails
return new Response(
JSON.stringify({
flagged: true,
category: null,
flags: [],
reason: 'Content moderation is temporarily unavailable. Please try again later.',
}),
{ status: 503, headers: { ...corsHeaders, 'Content-Type': 'application/json' } }
)
}
})