diff --git a/admin/src/app/ai-moderation/page.tsx b/admin/src/app/ai-moderation/page.tsx
index f56655d..d7b5acf 100644
--- a/admin/src/app/ai-moderation/page.tsx
+++ b/admin/src/app/ai-moderation/page.tsx
@@ -364,16 +364,19 @@ function ConfigEditor({ moderationType, config, onSaved }: {
{!modelId &&
Select and save a model first to test
}
{testResult && (
-
+
{testResult.error ? (
{testResult.error}
) : (
{/* Verdict */}
-
-
- {testResult.flagged ? '⛔ FLAGGED' : '✅ CLEAN'}
+
+
+ {testResult.action === 'flag' ? '⛔ FLAGGED' : testResult.action === 'nsfw' ? '⚠️ NSFW' : '✅ CLEAN'}
+ {testResult.nsfw_reason && (
+ {testResult.nsfw_reason}
+ )}
{testResult.reason && — {testResult.reason}}
diff --git a/go-backend/cmd/api/main.go b/go-backend/cmd/api/main.go
index 3bc02d5..370ccf1 100644
--- a/go-backend/cmd/api/main.go
+++ b/go-backend/cmd/api/main.go
@@ -131,7 +131,7 @@ func main() {
wsHandler := handlers.NewWSHandler(hub, cfg.JWTSecret)
userHandler := handlers.NewUserHandler(userRepo, postRepo, notificationService, assetService)
- postHandler := handlers.NewPostHandler(postRepo, userRepo, feedService, assetService, notificationService, moderationService, contentFilter)
+ postHandler := handlers.NewPostHandler(postRepo, userRepo, feedService, assetService, notificationService, moderationService, contentFilter, openRouterService)
chatHandler := handlers.NewChatHandler(chatRepo, notificationService, hub)
authHandler := handlers.NewAuthHandler(userRepo, cfg, emailService, sendPulseService)
categoryHandler := handlers.NewCategoryHandler(categoryRepo)
diff --git a/go-backend/internal/handlers/post_handler.go b/go-backend/internal/handlers/post_handler.go
index c38ad04..968e369 100644
--- a/go-backend/internal/handlers/post_handler.go
+++ b/go-backend/internal/handlers/post_handler.go
@@ -23,9 +23,10 @@ type PostHandler struct {
notificationService *services.NotificationService
moderationService *services.ModerationService
contentFilter *services.ContentFilter
+ openRouterService *services.OpenRouterService
}
-func NewPostHandler(postRepo *repository.PostRepository, userRepo *repository.UserRepository, feedService *services.FeedService, assetService *services.AssetService, notificationService *services.NotificationService, moderationService *services.ModerationService, contentFilter *services.ContentFilter) *PostHandler {
+func NewPostHandler(postRepo *repository.PostRepository, userRepo *repository.UserRepository, feedService *services.FeedService, assetService *services.AssetService, notificationService *services.NotificationService, moderationService *services.ModerationService, contentFilter *services.ContentFilter, openRouterService *services.OpenRouterService) *PostHandler {
return &PostHandler{
postRepo: postRepo,
userRepo: userRepo,
@@ -34,6 +35,7 @@ func NewPostHandler(postRepo *repository.PostRepository, userRepo *repository.Us
notificationService: notificationService,
moderationService: moderationService,
contentFilter: contentFilter,
+ openRouterService: openRouterService,
}
}
@@ -324,6 +326,28 @@ func (h *PostHandler) CreatePost(c *gin.Context) {
}
}
+ // 5b. OpenRouter AI Moderation — NSFW vs Flag decision
+ if h.openRouterService != nil {
+ orResult, orErr := h.openRouterService.ModerateText(c.Request.Context(), req.Body)
+ if orErr == nil && orResult != nil {
+ switch orResult.Action {
+ case "nsfw":
+ post.IsNSFW = true
+ post.NSFWReason = orResult.NSFWReason
+ if post.Status != "pending_moderation" {
+ post.Status = "active" // NSFW posts are active but blurred
+ }
+ case "flag":
+ post.Status = "pending_moderation"
+ }
+ // Update CIS from OpenRouter scores if available
+ if orResult.Hate > 0 || orResult.Greed > 0 || orResult.Delusion > 0 {
+ orCis := 1.0 - (orResult.Hate+orResult.Greed+orResult.Delusion)/3.0
+ post.CISScore = &orCis
+ }
+ }
+ }
+
// Create post
err = h.postRepo.CreatePost(c.Request.Context(), post)
if err != nil {
@@ -375,7 +399,13 @@ func (h *PostHandler) GetFeed(c *gin.Context) {
category := c.Query("category")
hasVideo := c.Query("has_video") == "true"
- posts, err := h.feedService.GetFeed(c.Request.Context(), userIDStr.(string), category, hasVideo, limit, offset)
+ // Check user's NSFW preference
+ showNSFW := false
+ if settings, err := h.userRepo.GetUserSettings(c.Request.Context(), userIDStr.(string)); err == nil && settings.NSFWEnabled != nil {
+ showNSFW = *settings.NSFWEnabled
+ }
+
+ posts, err := h.feedService.GetFeed(c.Request.Context(), userIDStr.(string), category, hasVideo, limit, offset, showNSFW)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to fetch feed", "details": err.Error()})
return
diff --git a/go-backend/internal/models/post.go b/go-backend/internal/models/post.go
index 000713a..f552d8b 100644
--- a/go-backend/internal/models/post.go
+++ b/go-backend/internal/models/post.go
@@ -31,6 +31,8 @@ type Post struct {
AllowChain bool `json:"allow_chain" db:"allow_chain"`
ChainParentID *uuid.UUID `json:"chain_parent_id" db:"chain_parent_id"`
Visibility string `json:"visibility" db:"visibility"`
+ IsNSFW bool `json:"is_nsfw" db:"is_nsfw"`
+ NSFWReason string `json:"nsfw_reason" db:"nsfw_reason"`
ExpiresAt *time.Time `json:"expires_at" db:"expires_at"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
EditedAt *time.Time `json:"edited_at,omitempty" db:"edited_at"`
diff --git a/go-backend/internal/models/settings.go b/go-backend/internal/models/settings.go
index ccf7dc2..921f473 100644
--- a/go-backend/internal/models/settings.go
+++ b/go-backend/internal/models/settings.go
@@ -30,5 +30,6 @@ type UserSettings struct {
AutoPlayVideos *bool `json:"auto_play_videos" db:"auto_play_videos"`
DataSaverMode *bool `json:"data_saver_mode" db:"data_saver_mode"`
DefaultPostTtl *int `json:"default_post_ttl" db:"default_post_ttl"`
+ NSFWEnabled *bool `json:"nsfw_enabled" db:"nsfw_enabled"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
diff --git a/go-backend/internal/repository/post_repository.go b/go-backend/internal/repository/post_repository.go
index fb4252c..4ca9136 100644
--- a/go-backend/internal/repository/post_repository.go
+++ b/go-backend/internal/repository/post_repository.go
@@ -37,14 +37,16 @@ func (r *PostRepository) CreatePost(ctx context.Context, post *models.Post) erro
author_id, category_id, body, status, tone_label, cis_score,
image_url, video_url, thumbnail_url, duration_ms, body_format, background_id, tags,
is_beacon, beacon_type, location, confidence_score,
- is_active_beacon, allow_chain, chain_parent_id, visibility, expires_at
+ is_active_beacon, allow_chain, chain_parent_id, visibility, expires_at,
+ is_nsfw, nsfw_reason
) VALUES (
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13,
$14, $15,
CASE WHEN ($16::double precision) IS NOT NULL AND ($17::double precision) IS NOT NULL
THEN ST_SetSRID(ST_MakePoint(($17::double precision), ($16::double precision)), 4326)::geography
ELSE NULL END,
- $18, $19, $20, $21, $22, $23
+ $18, $19, $20, $21, $22, $23,
+ $24, $25
) RETURNING id, created_at
`
@@ -59,6 +61,7 @@ func (r *PostRepository) CreatePost(ctx context.Context, post *models.Post) erro
post.ImageURL, post.VideoURL, post.ThumbnailURL, post.DurationMS, post.BodyFormat, post.BackgroundID, post.Tags,
post.IsBeacon, post.BeaconType, post.Lat, post.Long, post.Confidence,
post.IsActiveBeacon, post.AllowChain, post.ChainParentID, post.Visibility, post.ExpiresAt,
+ post.IsNSFW, post.NSFWReason,
).Scan(&post.ID, &post.CreatedAt)
if err != nil {
@@ -120,7 +123,7 @@ func (r *PostRepository) GetRandomSponsoredPost(ctx context.Context, userID stri
return &p, nil
}
-func (r *PostRepository) GetFeed(ctx context.Context, userID string, categorySlug string, hasVideo bool, limit int, offset int) ([]models.Post, error) {
+func (r *PostRepository) GetFeed(ctx context.Context, userID string, categorySlug string, hasVideo bool, limit int, offset int, showNSFW bool) ([]models.Post, error) {
query := `
SELECT
p.id, p.author_id, p.category_id, p.body,
@@ -139,7 +142,9 @@ func (r *PostRepository) GetFeed(ctx context.Context, userID string, categorySlu
CASE WHEN ($4::text) != '' THEN EXISTS(SELECT 1 FROM public.post_likes WHERE post_id = p.id AND user_id = $4::text::uuid) ELSE FALSE END as is_liked,
p.allow_chain, p.visibility,
COALESCE((SELECT jsonb_object_agg(emoji, count) FROM (SELECT emoji, COUNT(*) as count FROM public.post_reactions WHERE post_id = p.id GROUP BY emoji) r), '{}'::jsonb) as reaction_counts,
- CASE WHEN ($4::text) != '' THEN COALESCE((SELECT jsonb_agg(emoji) FROM public.post_reactions WHERE post_id = p.id AND user_id = $4::text::uuid), '[]'::jsonb) ELSE '[]'::jsonb END as my_reactions
+ CASE WHEN ($4::text) != '' THEN COALESCE((SELECT jsonb_agg(emoji) FROM public.post_reactions WHERE post_id = p.id AND user_id = $4::text::uuid), '[]'::jsonb) ELSE '[]'::jsonb END as my_reactions,
+ COALESCE(p.is_nsfw, FALSE) as is_nsfw,
+ COALESCE(p.nsfw_reason, '') as nsfw_reason
FROM public.posts p
JOIN public.profiles pr ON p.author_id = pr.id
LEFT JOIN public.post_metrics m ON p.id = m.post_id
@@ -156,10 +161,11 @@ func (r *PostRepository) GetFeed(ctx context.Context, userID string, categorySlu
AND NOT public.has_block_between(p.author_id, CASE WHEN $4::text != '' THEN $4::text::uuid ELSE NULL END)
AND ($3 = FALSE OR (COALESCE(p.video_url, '') <> '' OR (COALESCE(p.image_url, '') ILIKE '%.mp4')))
AND ($5 = '' OR c.slug = $5)
+ AND ($6 = TRUE OR COALESCE(p.is_nsfw, FALSE) = FALSE)
ORDER BY p.created_at DESC
LIMIT $1 OFFSET $2
`
- rows, err := r.pool.Query(ctx, query, limit, offset, hasVideo, userID, categorySlug)
+ rows, err := r.pool.Query(ctx, query, limit, offset, hasVideo, userID, categorySlug, showNSFW)
if err != nil {
return nil, err
}
@@ -173,6 +179,7 @@ func (r *PostRepository) GetFeed(ctx context.Context, userID string, categorySlu
&p.AuthorHandle, &p.AuthorDisplayName, &p.AuthorAvatarURL,
&p.LikeCount, &p.CommentCount, &p.IsLiked,
&p.AllowChain, &p.Visibility, &p.Reactions, &p.MyReactions,
+ &p.IsNSFW, &p.NSFWReason,
)
if err != nil {
return nil, err
diff --git a/go-backend/internal/repository/user_repository.go b/go-backend/internal/repository/user_repository.go
index 920ad42..f99a9e4 100644
--- a/go-backend/internal/repository/user_repository.go
+++ b/go-backend/internal/repository/user_repository.go
@@ -727,7 +727,7 @@ func (r *UserRepository) GetUserSettings(ctx context.Context, userID string) (*m
query := `
SELECT user_id, theme, language, notifications_enabled, email_notifications,
push_notifications, content_filter_level, auto_play_videos, data_saver_mode,
- default_post_ttl, updated_at
+ default_post_ttl, COALESCE(nsfw_enabled, FALSE), updated_at
FROM public.user_settings
WHERE user_id = $1::uuid
`
@@ -735,7 +735,7 @@ func (r *UserRepository) GetUserSettings(ctx context.Context, userID string) (*m
err := r.pool.QueryRow(ctx, query, userID).Scan(
&us.UserID, &us.Theme, &us.Language, &us.NotificationsEnabled, &us.EmailNotifications,
&us.PushNotifications, &us.ContentFilterLevel, &us.AutoPlayVideos, &us.DataSaverMode,
- &us.DefaultPostTtl, &us.UpdatedAt,
+ &us.DefaultPostTtl, &us.NSFWEnabled, &us.UpdatedAt,
)
if err != nil {
if err.Error() == "no rows in result set" || err.Error() == "pgx: no rows in result set" {
@@ -756,6 +756,7 @@ func (r *UserRepository) GetUserSettings(ctx context.Context, userID string) (*m
ContentFilterLevel: &med,
AutoPlayVideos: &t,
DataSaverMode: &f,
+ NSFWEnabled: &f,
UpdatedAt: time.Now(),
}, nil
}
@@ -769,8 +770,8 @@ func (r *UserRepository) UpdateUserSettings(ctx context.Context, us *models.User
INSERT INTO public.user_settings (
user_id, theme, language, notifications_enabled, email_notifications,
push_notifications, content_filter_level, auto_play_videos, data_saver_mode,
- default_post_ttl, updated_at
- ) VALUES ($1::uuid, $2, $3, $4, $5, $6, $7, $8, $9, $10, NOW())
+ default_post_ttl, nsfw_enabled, updated_at
+ ) VALUES ($1::uuid, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, NOW())
ON CONFLICT (user_id) DO UPDATE SET
theme = COALESCE(EXCLUDED.theme, user_settings.theme),
language = COALESCE(EXCLUDED.language, user_settings.language),
@@ -781,12 +782,13 @@ func (r *UserRepository) UpdateUserSettings(ctx context.Context, us *models.User
auto_play_videos = COALESCE(EXCLUDED.auto_play_videos, user_settings.auto_play_videos),
data_saver_mode = COALESCE(EXCLUDED.data_saver_mode, user_settings.data_saver_mode),
default_post_ttl = COALESCE(EXCLUDED.default_post_ttl, user_settings.default_post_ttl),
+ nsfw_enabled = COALESCE(EXCLUDED.nsfw_enabled, user_settings.nsfw_enabled),
updated_at = NOW()
`
_, err := r.pool.Exec(ctx, query,
us.UserID, us.Theme, us.Language, us.NotificationsEnabled, us.EmailNotifications,
us.PushNotifications, us.ContentFilterLevel, us.AutoPlayVideos, us.DataSaverMode,
- us.DefaultPostTtl,
+ us.DefaultPostTtl, us.NSFWEnabled,
)
return err
}
diff --git a/go-backend/internal/services/feed_service.go b/go-backend/internal/services/feed_service.go
index 51ec725..dbe9ec8 100644
--- a/go-backend/internal/services/feed_service.go
+++ b/go-backend/internal/services/feed_service.go
@@ -19,8 +19,8 @@ func NewFeedService(postRepo *repository.PostRepository, assetService *AssetServ
}
}
-func (s *FeedService) GetFeed(ctx context.Context, userID string, categorySlug string, hasVideo bool, limit int, offset int) ([]models.Post, error) {
- posts, err := s.postRepo.GetFeed(ctx, userID, categorySlug, hasVideo, limit, offset)
+func (s *FeedService) GetFeed(ctx context.Context, userID string, categorySlug string, hasVideo bool, limit int, offset int, showNSFW bool) ([]models.Post, error) {
+ posts, err := s.postRepo.GetFeed(ctx, userID, categorySlug, hasVideo, limit, offset, showNSFW)
if err != nil {
return nil, err
}
diff --git a/go-backend/internal/services/openrouter_service.go b/go-backend/internal/services/openrouter_service.go
index c430b3a..eddc756 100644
--- a/go-backend/internal/services/openrouter_service.go
+++ b/go-backend/internal/services/openrouter_service.go
@@ -217,6 +217,8 @@ func (s *OpenRouterService) ModerateVideo(ctx context.Context, frameURLs []strin
// ModerationResult is the parsed response from OpenRouter moderation
type ModerationResult struct {
Flagged bool `json:"flagged"`
+ Action string `json:"action"` // "clean", "nsfw", "flag"
+ NSFWReason string `json:"nsfw_reason"` // e.g. "violence", "nudity", "18+ content"
Reason string `json:"reason"`
Explanation string `json:"explanation"`
Hate float64 `json:"hate"`
@@ -338,6 +340,8 @@ func parseModerationResponse(raw string) *ModerationResult {
var parsed struct {
Flagged bool `json:"flagged"`
+ Action string `json:"action"`
+ NSFWReason string `json:"nsfw_reason"`
Reason string `json:"reason"`
Explanation string `json:"explanation"`
Hate float64 `json:"hate"`
@@ -350,7 +354,6 @@ func parseModerationResponse(raw string) *ModerationResult {
for _, candidate := range candidates {
if err := json.Unmarshal([]byte(candidate), &parsed); err == nil {
- result.Flagged = parsed.Flagged
result.Reason = parsed.Reason
result.Explanation = parsed.Explanation
result.Hate = parsed.Hate
@@ -359,17 +362,32 @@ func parseModerationResponse(raw string) *ModerationResult {
result.GreedDetail = parsed.GreedDetail
result.Delusion = parsed.Delusion
result.DelusionDetail = parsed.DelusionDetail
+ result.NSFWReason = parsed.NSFWReason
- // Safety: re-derive flagged from scores — if any score > 0.5, it's flagged
- // regardless of what the model put in the "flagged" field
- scoreFlagged := parsed.Hate > 0.5 || parsed.Greed > 0.5 || parsed.Delusion > 0.5
- if scoreFlagged != result.Flagged {
- result.Flagged = scoreFlagged
- if scoreFlagged && result.Reason == "" {
- result.Reason = "Flagged: score exceeded 0.5 threshold"
+ // Use the action field if present, otherwise derive from scores
+ action := strings.ToLower(strings.TrimSpace(parsed.Action))
+ if action == "nsfw" || action == "flag" || action == "clean" {
+ result.Action = action
+ } else {
+ // Fallback: derive from scores
+ maxScore := max(parsed.Hate, max(parsed.Greed, parsed.Delusion))
+ if maxScore > 0.5 {
+ result.Action = "flag"
+ } else if maxScore > 0.25 {
+ result.Action = "nsfw"
+ } else {
+ result.Action = "clean"
}
- if !scoreFlagged {
- result.Reason = ""
+ }
+
+ result.Flagged = result.Action == "flag"
+
+ // Safety override: if any score > 0.7, always flag regardless of what model said
+ if parsed.Hate > 0.7 || parsed.Greed > 0.7 || parsed.Delusion > 0.7 {
+ result.Action = "flag"
+ result.Flagged = true
+ if result.Reason == "" {
+ result.Reason = "Flagged: score exceeded 0.7 threshold"
}
}
@@ -383,19 +401,27 @@ func parseModerationResponse(raw string) *ModerationResult {
}
const defaultModerationSystemPrompt = `You are a content moderation AI for Sojorn, a social media platform.
-Analyze the provided content for policy violations.
+Analyze the provided content and decide one of three actions:
+
+1. "clean" — Content is appropriate for all users. No issues.
+2. "nsfw" — Content is NOT illegal or bannable, but is mature/sensitive. Examples: mild violence, suggestive (but not explicit) imagery, dark humor, intense themes, horror content, heated political speech, depictions of alcohol/smoking. This content will be blurred with a warning label so users who opted in can choose to view it.
+3. "flag" — Content violates platform policy and should be reviewed by moderators. Examples: explicit nudity/pornography, graphic gore, illegal activity, credible threats, child exploitation, hard drug use instructions, doxxing, extreme hate speech.
+
+When unsure, prefer "nsfw" over "flag" — only flag content you believe is clearly illegal or extremely graphic.
Respond ONLY with a JSON object in this exact format:
{
+ "action": "clean" or "nsfw" or "flag",
+ "nsfw_reason": "If action is nsfw, a short label users will see: e.g. 'Violence', 'Suggestive Content', '18+ Themes', 'Gore', 'Drug References'. Empty string if clean or flag.",
"flagged": true/false,
- "reason": "one-line summary if flagged, empty string if clean",
- "explanation": "Detailed paragraph explaining your full analysis. Describe exactly what you observed in the content, what specific elements you checked, why each category scored the way it did, and your overall reasoning for the flagged/clean decision.",
+ "reason": "one-line summary if flagged or nsfw, empty string if clean",
+ "explanation": "Detailed paragraph explaining your full analysis and why you chose this action.",
"hate": 0.0-1.0,
- "hate_detail": "Explain exactly what you found (or didn't find) related to hate. E.g. 'No hate speech, slurs, threats, violence, sexual content, or discriminatory language detected.' or 'Contains racial slur targeting [group] in aggressive context.'",
+ "hate_detail": "What you found or didn't find related to hate/violence/sexual content.",
"greed": 0.0-1.0,
- "greed_detail": "Explain exactly what you found (or didn't find) related to greed. E.g. 'No spam, scam language, or promotional manipulation detected.' or 'Contains crypto pump-and-dump language with fake earnings claims.'",
+ "greed_detail": "What you found or didn't find related to spam/scams/manipulation.",
"delusion": 0.0-1.0,
- "delusion_detail": "Explain exactly what you found (or didn't find) related to delusion. E.g. 'No misinformation, self-harm, or conspiracy content detected.' or 'Promotes unverified medical cure with dangerous dosage advice.'"
+ "delusion_detail": "What you found or didn't find related to misinformation/self-harm."
}
Scoring guide (Three Poisons framework):
@@ -403,6 +429,6 @@ Scoring guide (Three Poisons framework):
- greed: spam, scams, crypto schemes, misleading promotions, get-rich-quick, MLM recruitment
- delusion: misinformation, self-harm content, conspiracy theories, dangerous medical advice, deepfakes
-Score 0.0 = no concern, 1.0 = extreme violation. Flag if any score > 0.5.
+Score 0.0 = no concern, 1.0 = extreme violation.
ALWAYS provide detailed explanations even when content is clean — explain what you checked and why it passed.
Only respond with the JSON, no other text.`
diff --git a/sojorn_app/lib/models/post.dart b/sojorn_app/lib/models/post.dart
index 8a54c22..d1b9ea9 100644
--- a/sojorn_app/lib/models/post.dart
+++ b/sojorn_app/lib/models/post.dart
@@ -88,6 +88,9 @@ class Post {
final String? ctaLink;
final String? ctaText;
+ final bool isNsfw;
+ final String? nsfwReason;
+
Post({
required this.id,
required this.authorId,
@@ -135,6 +138,8 @@ class Post {
this.advertiserName,
this.ctaLink,
this.ctaText,
+ this.isNsfw = false,
+ this.nsfwReason,
});
static int? _parseInt(dynamic value) {
@@ -276,6 +281,8 @@ class Post {
advertiserName: json['advertiser_name'] as String?,
ctaLink: json['advertiser_cta_link'] as String?,
ctaText: json['advertiser_cta_text'] as String?,
+ isNsfw: json['is_nsfw'] as bool? ?? false,
+ nsfwReason: json['nsfw_reason'] as String?,
);
}
@@ -324,6 +331,8 @@ class Post {
'reactions': reactions,
'my_reactions': myReactions,
'reaction_users': reactionUsers,
+ 'is_nsfw': isNsfw,
+ 'nsfw_reason': nsfwReason,
};
}
}
diff --git a/sojorn_app/lib/models/user_settings.dart b/sojorn_app/lib/models/user_settings.dart
index 5ed95d6..ed0610a 100644
--- a/sojorn_app/lib/models/user_settings.dart
+++ b/sojorn_app/lib/models/user_settings.dart
@@ -9,6 +9,7 @@ class UserSettings {
final bool autoPlayVideos;
final bool dataSaverMode;
final int? defaultPostTtl;
+ final bool nsfwEnabled;
const UserSettings({
required this.userId,
@@ -21,6 +22,7 @@ class UserSettings {
this.autoPlayVideos = true,
this.dataSaverMode = false,
this.defaultPostTtl,
+ this.nsfwEnabled = false,
});
factory UserSettings.fromJson(Map json) {
@@ -35,6 +37,7 @@ class UserSettings {
autoPlayVideos: json['auto_play_videos'] as bool? ?? true,
dataSaverMode: json['data_saver_mode'] as bool? ?? false,
defaultPostTtl: _parseIntervalHours(json['default_post_ttl']),
+ nsfwEnabled: json['nsfw_enabled'] as bool? ?? false,
);
}
@@ -50,6 +53,7 @@ class UserSettings {
'auto_play_videos': autoPlayVideos,
'data_saver_mode': dataSaverMode,
'default_post_ttl': defaultPostTtl,
+ 'nsfw_enabled': nsfwEnabled,
};
}
@@ -63,6 +67,7 @@ class UserSettings {
bool? autoPlayVideos,
bool? dataSaverMode,
int? defaultPostTtl,
+ bool? nsfwEnabled,
}) {
return UserSettings(
userId: userId,
@@ -75,6 +80,7 @@ class UserSettings {
autoPlayVideos: autoPlayVideos ?? this.autoPlayVideos,
dataSaverMode: dataSaverMode ?? this.dataSaverMode,
defaultPostTtl: defaultPostTtl ?? this.defaultPostTtl,
+ nsfwEnabled: nsfwEnabled ?? this.nsfwEnabled,
);
}
diff --git a/sojorn_app/lib/screens/profile/profile_settings_screen.dart b/sojorn_app/lib/screens/profile/profile_settings_screen.dart
index 553f78f..7096a44 100644
--- a/sojorn_app/lib/screens/profile/profile_settings_screen.dart
+++ b/sojorn_app/lib/screens/profile/profile_settings_screen.dart
@@ -154,6 +154,9 @@ class _ProfileSettingsScreenState extends ConsumerState {
],
),
+ const SizedBox(height: AppTheme.spacingLg),
+ _buildNsfwSection(state),
+
const SizedBox(height: AppTheme.spacingLg * 2),
_buildLogoutButton(),
@@ -401,6 +404,50 @@ class _ProfileSettingsScreenState extends ConsumerState {
);
}
+ Widget _buildNsfwSection(dynamic state) {
+ final userSettings = state.user;
+ if (userSettings == null) return const SizedBox.shrink();
+
+ return Container(
+ decoration: BoxDecoration(
+ color: AppTheme.cardSurface,
+ borderRadius: BorderRadius.circular(20),
+ border: Border.all(color: AppTheme.navyBlue.withValues(alpha: 0.15)),
+ ),
+ padding: const EdgeInsets.all(20),
+ child: Column(
+ crossAxisAlignment: CrossAxisAlignment.start,
+ children: [
+ Row(
+ children: [
+ Icon(Icons.visibility_off_outlined, size: 20, color: Colors.amber.shade700),
+ const SizedBox(width: 8),
+ Text('Content Filters', style: AppTheme.textTheme.headlineSmall),
+ ],
+ ),
+ const SizedBox(height: 4),
+ Text(
+ 'Control what content appears in your feed',
+ style: AppTheme.textTheme.labelSmall?.copyWith(color: Colors.grey),
+ ),
+ const SizedBox(height: 16),
+ SwitchListTile(
+ contentPadding: EdgeInsets.zero,
+ title: const Text('Show Sensitive Content (NSFW)'),
+ subtitle: const Text(
+ 'Enable to see posts marked as sensitive (violence, mature themes, etc). Disabled by default.',
+ ),
+ value: userSettings.nsfwEnabled,
+ activeColor: Colors.amber.shade700,
+ onChanged: (v) => ref.read(settingsProvider.notifier).updateUser(
+ userSettings.copyWith(nsfwEnabled: v),
+ ),
+ ),
+ ],
+ ),
+ );
+ }
+
void _showPrivacyEditor() {
final state = ref.read(settingsProvider);
final privacy = state.privacy;
diff --git a/sojorn_app/lib/widgets/sojorn_post_card.dart b/sojorn_app/lib/widgets/sojorn_post_card.dart
index f376353..a29c31b 100644
--- a/sojorn_app/lib/widgets/sojorn_post_card.dart
+++ b/sojorn_app/lib/widgets/sojorn_post_card.dart
@@ -1,3 +1,4 @@
+import 'dart:ui';
import 'package:flutter/material.dart';
import '../models/post.dart';
@@ -34,7 +35,7 @@ import '../theme/sojorn_feed_palette.dart';
/// - Single source of truth for layout margins, padding, and elevation
/// - Pure stateless composition of sub-components
/// - ViewMode-driven visual variations without code duplication
-class sojornPostCard extends StatelessWidget {
+class sojornPostCard extends StatefulWidget {
final Post post;
final PostViewMode mode;
final VoidCallback? onTap;
@@ -56,6 +57,22 @@ class sojornPostCard extends StatelessWidget {
this.showChainContext = true,
});
+ @override
+ State createState() => _sojornPostCardState();
+}
+
+class _sojornPostCardState extends State {
+ bool _nsfwRevealed = false;
+
+ Post get post => widget.post;
+ PostViewMode get mode => widget.mode;
+ VoidCallback? get onTap => widget.onTap;
+ VoidCallback? get onChain => widget.onChain;
+ VoidCallback? get onPostChanged => widget.onPostChanged;
+ VoidCallback? get onChainParentTap => widget.onChainParentTap;
+ bool get isThreadView => widget.isThreadView;
+ bool get showChainContext => widget.showChainContext;
+
/// Get spacing values based on view mode
EdgeInsets get _padding {
switch (mode) {
@@ -178,20 +195,43 @@ class sojornPostCard extends StatelessWidget {
const SizedBox(height: 16),
// Body text - clickable for post detail with full background coverage
- InkWell(
- onTap: onTap,
- borderRadius: BorderRadius.circular(AppTheme.radiusMd),
- child: Container(
- width: double.infinity,
- padding: const EdgeInsets.symmetric(vertical: 4),
- child: PostBody(
- text: post.body,
- bodyFormat: post.bodyFormat,
- backgroundId: post.backgroundId,
- mode: mode,
+ if (post.isNsfw && !_nsfwRevealed) ...[
+ // NSFW blurred body
+ ClipRect(
+ child: Stack(
+ children: [
+ ImageFiltered(
+ imageFilter: ImageFilter.blur(sigmaX: 12, sigmaY: 12),
+ child: Container(
+ width: double.infinity,
+ padding: const EdgeInsets.symmetric(vertical: 4),
+ child: PostBody(
+ text: post.body,
+ bodyFormat: post.bodyFormat,
+ backgroundId: post.backgroundId,
+ mode: mode,
+ ),
+ ),
+ ),
+ ],
),
),
- ),
+ ] else ...[
+ InkWell(
+ onTap: onTap,
+ borderRadius: BorderRadius.circular(AppTheme.radiusMd),
+ child: Container(
+ width: double.infinity,
+ padding: const EdgeInsets.symmetric(vertical: 4),
+ child: PostBody(
+ text: post.body,
+ bodyFormat: post.bodyFormat,
+ backgroundId: post.backgroundId,
+ mode: mode,
+ ),
+ ),
+ ),
+ ],
],
),
),
@@ -201,10 +241,78 @@ class sojornPostCard extends StatelessWidget {
(post.thumbnailUrl != null && post.thumbnailUrl!.isNotEmpty) ||
(post.videoUrl != null && post.videoUrl!.isNotEmpty)) ...[
const SizedBox(height: 12),
- PostMedia(
- post: post,
- mode: mode,
- onTap: onTap,
+ if (post.isNsfw && !_nsfwRevealed) ...[
+ ClipRect(
+ child: ImageFiltered(
+ imageFilter: ImageFilter.blur(sigmaX: 20, sigmaY: 20),
+ child: PostMedia(
+ post: post,
+ mode: mode,
+ onTap: null,
+ ),
+ ),
+ ),
+ ] else ...[
+ PostMedia(
+ post: post,
+ mode: mode,
+ onTap: onTap,
+ ),
+ ],
+ ],
+
+ // NSFW warning banner with tap-to-reveal
+ if (post.isNsfw && !_nsfwRevealed) ...[
+ GestureDetector(
+ onTap: () => setState(() => _nsfwRevealed = true),
+ child: Container(
+ width: double.infinity,
+ margin: EdgeInsets.symmetric(horizontal: _padding.left, vertical: 8),
+ padding: const EdgeInsets.symmetric(vertical: 12, horizontal: 16),
+ decoration: BoxDecoration(
+ color: Colors.amber.shade800.withOpacity(0.15),
+ borderRadius: BorderRadius.circular(12),
+ border: Border.all(color: Colors.amber.shade700.withOpacity(0.3)),
+ ),
+ child: Column(
+ children: [
+ Row(
+ mainAxisAlignment: MainAxisAlignment.center,
+ children: [
+ Icon(Icons.visibility_off, size: 16, color: Colors.amber.shade700),
+ const SizedBox(width: 6),
+ Text(
+ 'Sensitive Content',
+ style: TextStyle(
+ fontWeight: FontWeight.w700,
+ fontSize: 13,
+ color: Colors.amber.shade700,
+ ),
+ ),
+ ],
+ ),
+ if (post.nsfwReason != null && post.nsfwReason!.isNotEmpty) ...[
+ const SizedBox(height: 4),
+ Text(
+ post.nsfwReason!,
+ style: TextStyle(
+ fontSize: 11,
+ color: Colors.amber.shade600,
+ ),
+ ),
+ ],
+ const SizedBox(height: 6),
+ Text(
+ 'Tap to reveal',
+ style: TextStyle(
+ fontSize: 11,
+ fontWeight: FontWeight.w500,
+ color: Colors.amber.shade600.withOpacity(0.8),
+ ),
+ ),
+ ],
+ ),
+ ),
),
],