NSFW content system: blur overlay, user toggle, AI tri-state (clean/nsfw/flag), feed filtering

This commit is contained in:
Patrick Britton 2026-02-06 20:42:23 -06:00
parent e81e9e52b7
commit 256592379a
13 changed files with 294 additions and 53 deletions

View file

@ -364,16 +364,19 @@ function ConfigEditor({ moderationType, config, onSaved }: {
{!modelId && <p className="text-xs text-amber-600">Select and save a model first to test</p>}
{testResult && (
<div className={`p-4 rounded-lg text-sm ${testResult.error ? 'bg-red-50 text-red-700' : testResult.flagged ? 'bg-red-50' : 'bg-green-50'}`}>
<div className={`p-4 rounded-lg text-sm ${testResult.error ? 'bg-red-50 text-red-700' : testResult.action === 'flag' ? 'bg-red-50' : testResult.action === 'nsfw' ? 'bg-amber-50' : 'bg-green-50'}`}>
{testResult.error ? (
<p>{testResult.error}</p>
) : (
<div className="space-y-3">
{/* Verdict */}
<div className="flex items-center gap-2">
<span className={`text-lg font-bold ${testResult.flagged ? 'text-red-700' : 'text-green-700'}`}>
{testResult.flagged ? '⛔ FLAGGED' : '✅ CLEAN'}
<div className="flex items-center gap-2 flex-wrap">
<span className={`text-lg font-bold ${testResult.action === 'flag' ? 'text-red-700' : testResult.action === 'nsfw' ? 'text-amber-700' : 'text-green-700'}`}>
{testResult.action === 'flag' ? '⛔ FLAGGED' : testResult.action === 'nsfw' ? '⚠️ NSFW' : '✅ CLEAN'}
</span>
{testResult.nsfw_reason && (
<span className="text-xs font-medium bg-amber-200 text-amber-800 px-2 py-0.5 rounded-full">{testResult.nsfw_reason}</span>
)}
{testResult.reason && <span className="text-gray-600"> {testResult.reason}</span>}
</div>

View file

@ -131,7 +131,7 @@ func main() {
wsHandler := handlers.NewWSHandler(hub, cfg.JWTSecret)
userHandler := handlers.NewUserHandler(userRepo, postRepo, notificationService, assetService)
postHandler := handlers.NewPostHandler(postRepo, userRepo, feedService, assetService, notificationService, moderationService, contentFilter)
postHandler := handlers.NewPostHandler(postRepo, userRepo, feedService, assetService, notificationService, moderationService, contentFilter, openRouterService)
chatHandler := handlers.NewChatHandler(chatRepo, notificationService, hub)
authHandler := handlers.NewAuthHandler(userRepo, cfg, emailService, sendPulseService)
categoryHandler := handlers.NewCategoryHandler(categoryRepo)

View file

@ -23,9 +23,10 @@ type PostHandler struct {
notificationService *services.NotificationService
moderationService *services.ModerationService
contentFilter *services.ContentFilter
openRouterService *services.OpenRouterService
}
func NewPostHandler(postRepo *repository.PostRepository, userRepo *repository.UserRepository, feedService *services.FeedService, assetService *services.AssetService, notificationService *services.NotificationService, moderationService *services.ModerationService, contentFilter *services.ContentFilter) *PostHandler {
func NewPostHandler(postRepo *repository.PostRepository, userRepo *repository.UserRepository, feedService *services.FeedService, assetService *services.AssetService, notificationService *services.NotificationService, moderationService *services.ModerationService, contentFilter *services.ContentFilter, openRouterService *services.OpenRouterService) *PostHandler {
return &PostHandler{
postRepo: postRepo,
userRepo: userRepo,
@ -34,6 +35,7 @@ func NewPostHandler(postRepo *repository.PostRepository, userRepo *repository.Us
notificationService: notificationService,
moderationService: moderationService,
contentFilter: contentFilter,
openRouterService: openRouterService,
}
}
@ -324,6 +326,28 @@ func (h *PostHandler) CreatePost(c *gin.Context) {
}
}
// 5b. OpenRouter AI Moderation — NSFW vs Flag decision
if h.openRouterService != nil {
orResult, orErr := h.openRouterService.ModerateText(c.Request.Context(), req.Body)
if orErr == nil && orResult != nil {
switch orResult.Action {
case "nsfw":
post.IsNSFW = true
post.NSFWReason = orResult.NSFWReason
if post.Status != "pending_moderation" {
post.Status = "active" // NSFW posts are active but blurred
}
case "flag":
post.Status = "pending_moderation"
}
// Update CIS from OpenRouter scores if available
if orResult.Hate > 0 || orResult.Greed > 0 || orResult.Delusion > 0 {
orCis := 1.0 - (orResult.Hate+orResult.Greed+orResult.Delusion)/3.0
post.CISScore = &orCis
}
}
}
// Create post
err = h.postRepo.CreatePost(c.Request.Context(), post)
if err != nil {
@ -375,7 +399,13 @@ func (h *PostHandler) GetFeed(c *gin.Context) {
category := c.Query("category")
hasVideo := c.Query("has_video") == "true"
posts, err := h.feedService.GetFeed(c.Request.Context(), userIDStr.(string), category, hasVideo, limit, offset)
// Check user's NSFW preference
showNSFW := false
if settings, err := h.userRepo.GetUserSettings(c.Request.Context(), userIDStr.(string)); err == nil && settings.NSFWEnabled != nil {
showNSFW = *settings.NSFWEnabled
}
posts, err := h.feedService.GetFeed(c.Request.Context(), userIDStr.(string), category, hasVideo, limit, offset, showNSFW)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to fetch feed", "details": err.Error()})
return

View file

@ -31,6 +31,8 @@ type Post struct {
AllowChain bool `json:"allow_chain" db:"allow_chain"`
ChainParentID *uuid.UUID `json:"chain_parent_id" db:"chain_parent_id"`
Visibility string `json:"visibility" db:"visibility"`
IsNSFW bool `json:"is_nsfw" db:"is_nsfw"`
NSFWReason string `json:"nsfw_reason" db:"nsfw_reason"`
ExpiresAt *time.Time `json:"expires_at" db:"expires_at"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
EditedAt *time.Time `json:"edited_at,omitempty" db:"edited_at"`

View file

@ -30,5 +30,6 @@ type UserSettings struct {
AutoPlayVideos *bool `json:"auto_play_videos" db:"auto_play_videos"`
DataSaverMode *bool `json:"data_saver_mode" db:"data_saver_mode"`
DefaultPostTtl *int `json:"default_post_ttl" db:"default_post_ttl"`
NSFWEnabled *bool `json:"nsfw_enabled" db:"nsfw_enabled"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}

View file

@ -37,14 +37,16 @@ func (r *PostRepository) CreatePost(ctx context.Context, post *models.Post) erro
author_id, category_id, body, status, tone_label, cis_score,
image_url, video_url, thumbnail_url, duration_ms, body_format, background_id, tags,
is_beacon, beacon_type, location, confidence_score,
is_active_beacon, allow_chain, chain_parent_id, visibility, expires_at
is_active_beacon, allow_chain, chain_parent_id, visibility, expires_at,
is_nsfw, nsfw_reason
) VALUES (
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13,
$14, $15,
CASE WHEN ($16::double precision) IS NOT NULL AND ($17::double precision) IS NOT NULL
THEN ST_SetSRID(ST_MakePoint(($17::double precision), ($16::double precision)), 4326)::geography
ELSE NULL END,
$18, $19, $20, $21, $22, $23
$18, $19, $20, $21, $22, $23,
$24, $25
) RETURNING id, created_at
`
@ -59,6 +61,7 @@ func (r *PostRepository) CreatePost(ctx context.Context, post *models.Post) erro
post.ImageURL, post.VideoURL, post.ThumbnailURL, post.DurationMS, post.BodyFormat, post.BackgroundID, post.Tags,
post.IsBeacon, post.BeaconType, post.Lat, post.Long, post.Confidence,
post.IsActiveBeacon, post.AllowChain, post.ChainParentID, post.Visibility, post.ExpiresAt,
post.IsNSFW, post.NSFWReason,
).Scan(&post.ID, &post.CreatedAt)
if err != nil {
@ -120,7 +123,7 @@ func (r *PostRepository) GetRandomSponsoredPost(ctx context.Context, userID stri
return &p, nil
}
func (r *PostRepository) GetFeed(ctx context.Context, userID string, categorySlug string, hasVideo bool, limit int, offset int) ([]models.Post, error) {
func (r *PostRepository) GetFeed(ctx context.Context, userID string, categorySlug string, hasVideo bool, limit int, offset int, showNSFW bool) ([]models.Post, error) {
query := `
SELECT
p.id, p.author_id, p.category_id, p.body,
@ -139,7 +142,9 @@ func (r *PostRepository) GetFeed(ctx context.Context, userID string, categorySlu
CASE WHEN ($4::text) != '' THEN EXISTS(SELECT 1 FROM public.post_likes WHERE post_id = p.id AND user_id = $4::text::uuid) ELSE FALSE END as is_liked,
p.allow_chain, p.visibility,
COALESCE((SELECT jsonb_object_agg(emoji, count) FROM (SELECT emoji, COUNT(*) as count FROM public.post_reactions WHERE post_id = p.id GROUP BY emoji) r), '{}'::jsonb) as reaction_counts,
CASE WHEN ($4::text) != '' THEN COALESCE((SELECT jsonb_agg(emoji) FROM public.post_reactions WHERE post_id = p.id AND user_id = $4::text::uuid), '[]'::jsonb) ELSE '[]'::jsonb END as my_reactions
CASE WHEN ($4::text) != '' THEN COALESCE((SELECT jsonb_agg(emoji) FROM public.post_reactions WHERE post_id = p.id AND user_id = $4::text::uuid), '[]'::jsonb) ELSE '[]'::jsonb END as my_reactions,
COALESCE(p.is_nsfw, FALSE) as is_nsfw,
COALESCE(p.nsfw_reason, '') as nsfw_reason
FROM public.posts p
JOIN public.profiles pr ON p.author_id = pr.id
LEFT JOIN public.post_metrics m ON p.id = m.post_id
@ -156,10 +161,11 @@ func (r *PostRepository) GetFeed(ctx context.Context, userID string, categorySlu
AND NOT public.has_block_between(p.author_id, CASE WHEN $4::text != '' THEN $4::text::uuid ELSE NULL END)
AND ($3 = FALSE OR (COALESCE(p.video_url, '') <> '' OR (COALESCE(p.image_url, '') ILIKE '%.mp4')))
AND ($5 = '' OR c.slug = $5)
AND ($6 = TRUE OR COALESCE(p.is_nsfw, FALSE) = FALSE)
ORDER BY p.created_at DESC
LIMIT $1 OFFSET $2
`
rows, err := r.pool.Query(ctx, query, limit, offset, hasVideo, userID, categorySlug)
rows, err := r.pool.Query(ctx, query, limit, offset, hasVideo, userID, categorySlug, showNSFW)
if err != nil {
return nil, err
}
@ -173,6 +179,7 @@ func (r *PostRepository) GetFeed(ctx context.Context, userID string, categorySlu
&p.AuthorHandle, &p.AuthorDisplayName, &p.AuthorAvatarURL,
&p.LikeCount, &p.CommentCount, &p.IsLiked,
&p.AllowChain, &p.Visibility, &p.Reactions, &p.MyReactions,
&p.IsNSFW, &p.NSFWReason,
)
if err != nil {
return nil, err

View file

@ -727,7 +727,7 @@ func (r *UserRepository) GetUserSettings(ctx context.Context, userID string) (*m
query := `
SELECT user_id, theme, language, notifications_enabled, email_notifications,
push_notifications, content_filter_level, auto_play_videos, data_saver_mode,
default_post_ttl, updated_at
default_post_ttl, COALESCE(nsfw_enabled, FALSE), updated_at
FROM public.user_settings
WHERE user_id = $1::uuid
`
@ -735,7 +735,7 @@ func (r *UserRepository) GetUserSettings(ctx context.Context, userID string) (*m
err := r.pool.QueryRow(ctx, query, userID).Scan(
&us.UserID, &us.Theme, &us.Language, &us.NotificationsEnabled, &us.EmailNotifications,
&us.PushNotifications, &us.ContentFilterLevel, &us.AutoPlayVideos, &us.DataSaverMode,
&us.DefaultPostTtl, &us.UpdatedAt,
&us.DefaultPostTtl, &us.NSFWEnabled, &us.UpdatedAt,
)
if err != nil {
if err.Error() == "no rows in result set" || err.Error() == "pgx: no rows in result set" {
@ -756,6 +756,7 @@ func (r *UserRepository) GetUserSettings(ctx context.Context, userID string) (*m
ContentFilterLevel: &med,
AutoPlayVideos: &t,
DataSaverMode: &f,
NSFWEnabled: &f,
UpdatedAt: time.Now(),
}, nil
}
@ -769,8 +770,8 @@ func (r *UserRepository) UpdateUserSettings(ctx context.Context, us *models.User
INSERT INTO public.user_settings (
user_id, theme, language, notifications_enabled, email_notifications,
push_notifications, content_filter_level, auto_play_videos, data_saver_mode,
default_post_ttl, updated_at
) VALUES ($1::uuid, $2, $3, $4, $5, $6, $7, $8, $9, $10, NOW())
default_post_ttl, nsfw_enabled, updated_at
) VALUES ($1::uuid, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, NOW())
ON CONFLICT (user_id) DO UPDATE SET
theme = COALESCE(EXCLUDED.theme, user_settings.theme),
language = COALESCE(EXCLUDED.language, user_settings.language),
@ -781,12 +782,13 @@ func (r *UserRepository) UpdateUserSettings(ctx context.Context, us *models.User
auto_play_videos = COALESCE(EXCLUDED.auto_play_videos, user_settings.auto_play_videos),
data_saver_mode = COALESCE(EXCLUDED.data_saver_mode, user_settings.data_saver_mode),
default_post_ttl = COALESCE(EXCLUDED.default_post_ttl, user_settings.default_post_ttl),
nsfw_enabled = COALESCE(EXCLUDED.nsfw_enabled, user_settings.nsfw_enabled),
updated_at = NOW()
`
_, err := r.pool.Exec(ctx, query,
us.UserID, us.Theme, us.Language, us.NotificationsEnabled, us.EmailNotifications,
us.PushNotifications, us.ContentFilterLevel, us.AutoPlayVideos, us.DataSaverMode,
us.DefaultPostTtl,
us.DefaultPostTtl, us.NSFWEnabled,
)
return err
}

View file

@ -19,8 +19,8 @@ func NewFeedService(postRepo *repository.PostRepository, assetService *AssetServ
}
}
func (s *FeedService) GetFeed(ctx context.Context, userID string, categorySlug string, hasVideo bool, limit int, offset int) ([]models.Post, error) {
posts, err := s.postRepo.GetFeed(ctx, userID, categorySlug, hasVideo, limit, offset)
func (s *FeedService) GetFeed(ctx context.Context, userID string, categorySlug string, hasVideo bool, limit int, offset int, showNSFW bool) ([]models.Post, error) {
posts, err := s.postRepo.GetFeed(ctx, userID, categorySlug, hasVideo, limit, offset, showNSFW)
if err != nil {
return nil, err
}

View file

@ -217,6 +217,8 @@ func (s *OpenRouterService) ModerateVideo(ctx context.Context, frameURLs []strin
// ModerationResult is the parsed response from OpenRouter moderation
type ModerationResult struct {
Flagged bool `json:"flagged"`
Action string `json:"action"` // "clean", "nsfw", "flag"
NSFWReason string `json:"nsfw_reason"` // e.g. "violence", "nudity", "18+ content"
Reason string `json:"reason"`
Explanation string `json:"explanation"`
Hate float64 `json:"hate"`
@ -338,6 +340,8 @@ func parseModerationResponse(raw string) *ModerationResult {
var parsed struct {
Flagged bool `json:"flagged"`
Action string `json:"action"`
NSFWReason string `json:"nsfw_reason"`
Reason string `json:"reason"`
Explanation string `json:"explanation"`
Hate float64 `json:"hate"`
@ -350,7 +354,6 @@ func parseModerationResponse(raw string) *ModerationResult {
for _, candidate := range candidates {
if err := json.Unmarshal([]byte(candidate), &parsed); err == nil {
result.Flagged = parsed.Flagged
result.Reason = parsed.Reason
result.Explanation = parsed.Explanation
result.Hate = parsed.Hate
@ -359,17 +362,32 @@ func parseModerationResponse(raw string) *ModerationResult {
result.GreedDetail = parsed.GreedDetail
result.Delusion = parsed.Delusion
result.DelusionDetail = parsed.DelusionDetail
result.NSFWReason = parsed.NSFWReason
// Safety: re-derive flagged from scores — if any score > 0.5, it's flagged
// regardless of what the model put in the "flagged" field
scoreFlagged := parsed.Hate > 0.5 || parsed.Greed > 0.5 || parsed.Delusion > 0.5
if scoreFlagged != result.Flagged {
result.Flagged = scoreFlagged
if scoreFlagged && result.Reason == "" {
result.Reason = "Flagged: score exceeded 0.5 threshold"
// Use the action field if present, otherwise derive from scores
action := strings.ToLower(strings.TrimSpace(parsed.Action))
if action == "nsfw" || action == "flag" || action == "clean" {
result.Action = action
} else {
// Fallback: derive from scores
maxScore := max(parsed.Hate, max(parsed.Greed, parsed.Delusion))
if maxScore > 0.5 {
result.Action = "flag"
} else if maxScore > 0.25 {
result.Action = "nsfw"
} else {
result.Action = "clean"
}
if !scoreFlagged {
result.Reason = ""
}
result.Flagged = result.Action == "flag"
// Safety override: if any score > 0.7, always flag regardless of what model said
if parsed.Hate > 0.7 || parsed.Greed > 0.7 || parsed.Delusion > 0.7 {
result.Action = "flag"
result.Flagged = true
if result.Reason == "" {
result.Reason = "Flagged: score exceeded 0.7 threshold"
}
}
@ -383,19 +401,27 @@ func parseModerationResponse(raw string) *ModerationResult {
}
const defaultModerationSystemPrompt = `You are a content moderation AI for Sojorn, a social media platform.
Analyze the provided content for policy violations.
Analyze the provided content and decide one of three actions:
1. "clean" Content is appropriate for all users. No issues.
2. "nsfw" Content is NOT illegal or bannable, but is mature/sensitive. Examples: mild violence, suggestive (but not explicit) imagery, dark humor, intense themes, horror content, heated political speech, depictions of alcohol/smoking. This content will be blurred with a warning label so users who opted in can choose to view it.
3. "flag" Content violates platform policy and should be reviewed by moderators. Examples: explicit nudity/pornography, graphic gore, illegal activity, credible threats, child exploitation, hard drug use instructions, doxxing, extreme hate speech.
When unsure, prefer "nsfw" over "flag" only flag content you believe is clearly illegal or extremely graphic.
Respond ONLY with a JSON object in this exact format:
{
"action": "clean" or "nsfw" or "flag",
"nsfw_reason": "If action is nsfw, a short label users will see: e.g. 'Violence', 'Suggestive Content', '18+ Themes', 'Gore', 'Drug References'. Empty string if clean or flag.",
"flagged": true/false,
"reason": "one-line summary if flagged, empty string if clean",
"explanation": "Detailed paragraph explaining your full analysis. Describe exactly what you observed in the content, what specific elements you checked, why each category scored the way it did, and your overall reasoning for the flagged/clean decision.",
"reason": "one-line summary if flagged or nsfw, empty string if clean",
"explanation": "Detailed paragraph explaining your full analysis and why you chose this action.",
"hate": 0.0-1.0,
"hate_detail": "Explain exactly what you found (or didn't find) related to hate. E.g. 'No hate speech, slurs, threats, violence, sexual content, or discriminatory language detected.' or 'Contains racial slur targeting [group] in aggressive context.'",
"hate_detail": "What you found or didn't find related to hate/violence/sexual content.",
"greed": 0.0-1.0,
"greed_detail": "Explain exactly what you found (or didn't find) related to greed. E.g. 'No spam, scam language, or promotional manipulation detected.' or 'Contains crypto pump-and-dump language with fake earnings claims.'",
"greed_detail": "What you found or didn't find related to spam/scams/manipulation.",
"delusion": 0.0-1.0,
"delusion_detail": "Explain exactly what you found (or didn't find) related to delusion. E.g. 'No misinformation, self-harm, or conspiracy content detected.' or 'Promotes unverified medical cure with dangerous dosage advice.'"
"delusion_detail": "What you found or didn't find related to misinformation/self-harm."
}
Scoring guide (Three Poisons framework):
@ -403,6 +429,6 @@ Scoring guide (Three Poisons framework):
- greed: spam, scams, crypto schemes, misleading promotions, get-rich-quick, MLM recruitment
- delusion: misinformation, self-harm content, conspiracy theories, dangerous medical advice, deepfakes
Score 0.0 = no concern, 1.0 = extreme violation. Flag if any score > 0.5.
Score 0.0 = no concern, 1.0 = extreme violation.
ALWAYS provide detailed explanations even when content is clean explain what you checked and why it passed.
Only respond with the JSON, no other text.`

View file

@ -88,6 +88,9 @@ class Post {
final String? ctaLink;
final String? ctaText;
final bool isNsfw;
final String? nsfwReason;
Post({
required this.id,
required this.authorId,
@ -135,6 +138,8 @@ class Post {
this.advertiserName,
this.ctaLink,
this.ctaText,
this.isNsfw = false,
this.nsfwReason,
});
static int? _parseInt(dynamic value) {
@ -276,6 +281,8 @@ class Post {
advertiserName: json['advertiser_name'] as String?,
ctaLink: json['advertiser_cta_link'] as String?,
ctaText: json['advertiser_cta_text'] as String?,
isNsfw: json['is_nsfw'] as bool? ?? false,
nsfwReason: json['nsfw_reason'] as String?,
);
}
@ -324,6 +331,8 @@ class Post {
'reactions': reactions,
'my_reactions': myReactions,
'reaction_users': reactionUsers,
'is_nsfw': isNsfw,
'nsfw_reason': nsfwReason,
};
}
}

View file

@ -9,6 +9,7 @@ class UserSettings {
final bool autoPlayVideos;
final bool dataSaverMode;
final int? defaultPostTtl;
final bool nsfwEnabled;
const UserSettings({
required this.userId,
@ -21,6 +22,7 @@ class UserSettings {
this.autoPlayVideos = true,
this.dataSaverMode = false,
this.defaultPostTtl,
this.nsfwEnabled = false,
});
factory UserSettings.fromJson(Map<String, dynamic> json) {
@ -35,6 +37,7 @@ class UserSettings {
autoPlayVideos: json['auto_play_videos'] as bool? ?? true,
dataSaverMode: json['data_saver_mode'] as bool? ?? false,
defaultPostTtl: _parseIntervalHours(json['default_post_ttl']),
nsfwEnabled: json['nsfw_enabled'] as bool? ?? false,
);
}
@ -50,6 +53,7 @@ class UserSettings {
'auto_play_videos': autoPlayVideos,
'data_saver_mode': dataSaverMode,
'default_post_ttl': defaultPostTtl,
'nsfw_enabled': nsfwEnabled,
};
}
@ -63,6 +67,7 @@ class UserSettings {
bool? autoPlayVideos,
bool? dataSaverMode,
int? defaultPostTtl,
bool? nsfwEnabled,
}) {
return UserSettings(
userId: userId,
@ -75,6 +80,7 @@ class UserSettings {
autoPlayVideos: autoPlayVideos ?? this.autoPlayVideos,
dataSaverMode: dataSaverMode ?? this.dataSaverMode,
defaultPostTtl: defaultPostTtl ?? this.defaultPostTtl,
nsfwEnabled: nsfwEnabled ?? this.nsfwEnabled,
);
}

View file

@ -154,6 +154,9 @@ class _ProfileSettingsScreenState extends ConsumerState<ProfileSettingsScreen> {
],
),
const SizedBox(height: AppTheme.spacingLg),
_buildNsfwSection(state),
const SizedBox(height: AppTheme.spacingLg * 2),
_buildLogoutButton(),
@ -401,6 +404,50 @@ class _ProfileSettingsScreenState extends ConsumerState<ProfileSettingsScreen> {
);
}
Widget _buildNsfwSection(dynamic state) {
final userSettings = state.user;
if (userSettings == null) return const SizedBox.shrink();
return Container(
decoration: BoxDecoration(
color: AppTheme.cardSurface,
borderRadius: BorderRadius.circular(20),
border: Border.all(color: AppTheme.navyBlue.withValues(alpha: 0.15)),
),
padding: const EdgeInsets.all(20),
child: Column(
crossAxisAlignment: CrossAxisAlignment.start,
children: [
Row(
children: [
Icon(Icons.visibility_off_outlined, size: 20, color: Colors.amber.shade700),
const SizedBox(width: 8),
Text('Content Filters', style: AppTheme.textTheme.headlineSmall),
],
),
const SizedBox(height: 4),
Text(
'Control what content appears in your feed',
style: AppTheme.textTheme.labelSmall?.copyWith(color: Colors.grey),
),
const SizedBox(height: 16),
SwitchListTile(
contentPadding: EdgeInsets.zero,
title: const Text('Show Sensitive Content (NSFW)'),
subtitle: const Text(
'Enable to see posts marked as sensitive (violence, mature themes, etc). Disabled by default.',
),
value: userSettings.nsfwEnabled,
activeColor: Colors.amber.shade700,
onChanged: (v) => ref.read(settingsProvider.notifier).updateUser(
userSettings.copyWith(nsfwEnabled: v),
),
),
],
),
);
}
void _showPrivacyEditor() {
final state = ref.read(settingsProvider);
final privacy = state.privacy;

View file

@ -1,3 +1,4 @@
import 'dart:ui';
import 'package:flutter/material.dart';
import '../models/post.dart';
@ -34,7 +35,7 @@ import '../theme/sojorn_feed_palette.dart';
/// - Single source of truth for layout margins, padding, and elevation
/// - Pure stateless composition of sub-components
/// - ViewMode-driven visual variations without code duplication
class sojornPostCard extends StatelessWidget {
class sojornPostCard extends StatefulWidget {
final Post post;
final PostViewMode mode;
final VoidCallback? onTap;
@ -56,6 +57,22 @@ class sojornPostCard extends StatelessWidget {
this.showChainContext = true,
});
@override
State<sojornPostCard> createState() => _sojornPostCardState();
}
class _sojornPostCardState extends State<sojornPostCard> {
bool _nsfwRevealed = false;
Post get post => widget.post;
PostViewMode get mode => widget.mode;
VoidCallback? get onTap => widget.onTap;
VoidCallback? get onChain => widget.onChain;
VoidCallback? get onPostChanged => widget.onPostChanged;
VoidCallback? get onChainParentTap => widget.onChainParentTap;
bool get isThreadView => widget.isThreadView;
bool get showChainContext => widget.showChainContext;
/// Get spacing values based on view mode
EdgeInsets get _padding {
switch (mode) {
@ -178,20 +195,43 @@ class sojornPostCard extends StatelessWidget {
const SizedBox(height: 16),
// Body text - clickable for post detail with full background coverage
InkWell(
onTap: onTap,
borderRadius: BorderRadius.circular(AppTheme.radiusMd),
child: Container(
width: double.infinity,
padding: const EdgeInsets.symmetric(vertical: 4),
child: PostBody(
text: post.body,
bodyFormat: post.bodyFormat,
backgroundId: post.backgroundId,
mode: mode,
if (post.isNsfw && !_nsfwRevealed) ...[
// NSFW blurred body
ClipRect(
child: Stack(
children: [
ImageFiltered(
imageFilter: ImageFilter.blur(sigmaX: 12, sigmaY: 12),
child: Container(
width: double.infinity,
padding: const EdgeInsets.symmetric(vertical: 4),
child: PostBody(
text: post.body,
bodyFormat: post.bodyFormat,
backgroundId: post.backgroundId,
mode: mode,
),
),
),
],
),
),
),
] else ...[
InkWell(
onTap: onTap,
borderRadius: BorderRadius.circular(AppTheme.radiusMd),
child: Container(
width: double.infinity,
padding: const EdgeInsets.symmetric(vertical: 4),
child: PostBody(
text: post.body,
bodyFormat: post.bodyFormat,
backgroundId: post.backgroundId,
mode: mode,
),
),
),
],
],
),
),
@ -201,10 +241,78 @@ class sojornPostCard extends StatelessWidget {
(post.thumbnailUrl != null && post.thumbnailUrl!.isNotEmpty) ||
(post.videoUrl != null && post.videoUrl!.isNotEmpty)) ...[
const SizedBox(height: 12),
PostMedia(
post: post,
mode: mode,
onTap: onTap,
if (post.isNsfw && !_nsfwRevealed) ...[
ClipRect(
child: ImageFiltered(
imageFilter: ImageFilter.blur(sigmaX: 20, sigmaY: 20),
child: PostMedia(
post: post,
mode: mode,
onTap: null,
),
),
),
] else ...[
PostMedia(
post: post,
mode: mode,
onTap: onTap,
),
],
],
// NSFW warning banner with tap-to-reveal
if (post.isNsfw && !_nsfwRevealed) ...[
GestureDetector(
onTap: () => setState(() => _nsfwRevealed = true),
child: Container(
width: double.infinity,
margin: EdgeInsets.symmetric(horizontal: _padding.left, vertical: 8),
padding: const EdgeInsets.symmetric(vertical: 12, horizontal: 16),
decoration: BoxDecoration(
color: Colors.amber.shade800.withOpacity(0.15),
borderRadius: BorderRadius.circular(12),
border: Border.all(color: Colors.amber.shade700.withOpacity(0.3)),
),
child: Column(
children: [
Row(
mainAxisAlignment: MainAxisAlignment.center,
children: [
Icon(Icons.visibility_off, size: 16, color: Colors.amber.shade700),
const SizedBox(width: 6),
Text(
'Sensitive Content',
style: TextStyle(
fontWeight: FontWeight.w700,
fontSize: 13,
color: Colors.amber.shade700,
),
),
],
),
if (post.nsfwReason != null && post.nsfwReason!.isNotEmpty) ...[
const SizedBox(height: 4),
Text(
post.nsfwReason!,
style: TextStyle(
fontSize: 11,
color: Colors.amber.shade600,
),
),
],
const SizedBox(height: 6),
Text(
'Tap to reveal',
style: TextStyle(
fontSize: 11,
fontWeight: FontWeight.w500,
color: Colors.amber.shade600.withOpacity(0.8),
),
),
],
),
),
),
],