This guide will walk you through setting up SafeMind from scratch.
Before you begin, ensure you have:
Purpose: Toxicity and harmful content detection
Free Tier: 1 request per second (unlimited requests)
Purpose: Comprehensive content moderation
Free Tier: $5 credit for new users (Moderation API is free to use)
Purpose: Additional ML models
Free Tier: Limited inference API calls
Advantages:
Best for: Full-stack developers, teams wanting unified codebase
Advantages:
Best for: Data scientists, ML engineers, Python experts
# Create project directory
mkdir safemind-app
cd safemind-app
# Initialize package manager
npm init -y
# Install pnpm (faster alternative to npm)
npm install -g pnpm
# Create workspace structure
mkdir -p apps/web apps/api packages/shared
# Initialize Next.js frontend
cd apps/web
pnpm create next-app@latest . --typescript --tailwind --app --no-src-dir
# Initialize Express backend
cd ../api
pnpm init
pnpm add express cors helmet dotenv zod
pnpm add -D typescript @types/node @types/express tsx
# Return to root
cd ../..
# Create project directory
mkdir safemind-app
cd safemind-app
# Create virtual environment
python -m venv venv
source venv/bin/activate # On Windows: venv\Scripts\activate
# Create project structure
mkdir -p api/src api/tests frontend
# Initialize backend
cd api
pip install fastapi uvicorn python-dotenv pydantic
pip install openai google-cloud-aiplatform transformers
pip freeze > requirements.txt
# Initialize frontend (using React with Vite)
cd ../frontend
npm create vite@latest . -- --template react-ts
npm install
Create .env file in your API directory:
# API Server
PORT=3001
NODE_ENV=development
# API Keys
PERSPECTIVE_API_KEY=your_perspective_api_key_here
OPENAI_API_KEY=your_openai_api_key_here
HUGGINGFACE_API_KEY=your_hf_key_here # Optional
# Database (Supabase)
DATABASE_URL=your_supabase_postgres_url
SUPABASE_URL=https://your-project.supabase.co
SUPABASE_ANON_KEY=your_anon_key
SUPABASE_SERVICE_ROLE_KEY=your_service_role_key
# Notifications (setup later)
SENDGRID_API_KEY=your_sendgrid_key # For email
TWILIO_ACCOUNT_SID=your_twilio_sid # For SMS
TWILIO_AUTH_TOKEN=your_twilio_token
TWILIO_PHONE_NUMBER=+1234567890
# Security
JWT_SECRET=generate_a_random_secret_here
ENCRYPTION_KEY=generate_32_byte_key_here
# Rate Limiting
RATE_LIMIT_WINDOW_MS=60000
RATE_LIMIT_MAX_REQUESTS=100
Important: Add .env to your .gitignore file!
PROJECT_STRUCTURE.md.env as DATABASE_URLCreate apps/api/src/index.ts:
import express from 'express';
import cors from 'cors';
import helmet from 'helmet';
import dotenv from 'dotenv';
dotenv.config();
const app = express();
const PORT = process.env.PORT || 3001;
// Middleware
app.use(helmet());
app.use(cors());
app.use(express.json());
// Health check
app.get('/health', (req, res) => {
res.json({ status: 'ok', timestamp: new Date() });
});
// Analyze endpoint
app.post('/api/analyze', async (req, res) => {
const { text } = req.body;
if (!text) {
return res.status(400).json({ error: 'Text is required' });
}
try {
// Import your analyzer
const { CombinedContentAnalyzer } = await import('./services/analyzer');
const analyzer = new CombinedContentAnalyzer(
process.env.PERSPECTIVE_API_KEY!,
process.env.OPENAI_API_KEY!
);
const result = await analyzer.analyzeMessage(text);
res.json({
success: true,
analysis: result
});
} catch (error) {
console.error('Analysis error:', error);
res.status(500).json({
success: false,
error: 'Analysis failed'
});
}
});
app.listen(PORT, () => {
console.log(`SafeMind API running on port ${PORT}`);
});
Run it:
npx tsx src/index.ts
Create api/src/main.py:
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from dotenv import load_dotenv
import os
load_dotenv()
app = FastAPI(title="SafeMind API")
class AnalyzeRequest(BaseModel):
text: str
class AnalyzeResponse(BaseModel):
success: bool
analysis: dict
@app.get("/health")
async def health_check():
return {"status": "ok", "timestamp": datetime.now()}
@app.post("/api/analyze", response_model=AnalyzeResponse)
async def analyze_text(request: AnalyzeRequest):
if not request.text:
raise HTTPException(status_code=400, detail="Text is required")
try:
# Import your analyzer
from services.analyzer import CombinedContentAnalyzer
analyzer = CombinedContentAnalyzer(
perspective_api_key=os.getenv("PERSPECTIVE_API_KEY"),
openai_api_key=os.getenv("OPENAI_API_KEY")
)
result = await analyzer.analyze_message(request.text)
return {
"success": True,
"analysis": result
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=3001)
Run it:
python src/main.py
curl -X POST http://localhost:3001/api/analyze \
-H "Content-Type: application/json" \
-d '{"text": "I am feeling really down today"}'
http://localhost:3001/api/analyze{
"text": "I am feeling really down today"
}
{
"success": true,
"analysis": {
"riskScore": 35.2,
"riskLevel": "medium",
"flags": {
"toxicity": false,
"selfHarm": false,
"mentalHealthConcern": true
},
"requiresAlert": false,
"suggestedActions": [
"Monitor conversation closely"
]
}
}
Create apps/web/app/dashboard/page.tsx:
'use client';
import { useState } from 'react';
export default function Dashboard() {
const [text, setText] = useState('');
const [analysis, setAnalysis] = useState<any>(null);
const [loading, setLoading] = useState(false);
const analyzeText = async () => {
setLoading(true);
try {
const response = await fetch('http://localhost:3001/api/analyze', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ text }),
});
const data = await response.json();
setAnalysis(data.analysis);
} catch (error) {
console.error('Error:', error);
} finally {
setLoading(false);
}
};
return (
<div className="container mx-auto p-8">
<h1 className="text-3xl font-bold mb-8">SafeMind Monitor</h1>
<div className="mb-6">
<textarea
className="w-full p-4 border rounded-lg"
rows={6}
placeholder="Enter text to analyze..."
value={text}
onChange={(e) => setText(e.target.value)}
/>
<button
className="mt-4 px-6 py-2 bg-blue-600 text-white rounded-lg"
onClick={analyzeText}
disabled={loading || !text}
>
{loading ? 'Analyzing...' : 'Analyze'}
</button>
</div>
{analysis && (
<div className="border rounded-lg p-6">
<h2 className="text-xl font-semibold mb-4">Analysis Results</h2>
<div className="mb-4">
<span className="font-medium">Risk Level: </span>
<span className={`
px-3 py-1 rounded
${analysis.riskLevel === 'critical' ? 'bg-red-500 text-white' : ''}
${analysis.riskLevel === 'high' ? 'bg-orange-500 text-white' : ''}
${analysis.riskLevel === 'medium' ? 'bg-yellow-500 text-white' : ''}
${analysis.riskLevel === 'low' ? 'bg-green-500 text-white' : ''}
`}>
{analysis.riskLevel.toUpperCase()}
</span>
</div>
<div className="mb-4">
<span className="font-medium">Risk Score: </span>
<span>{analysis.riskScore.toFixed(1)}/100</span>
</div>
{analysis.requiresAlert && (
<div className="bg-red-100 border border-red-400 text-red-700 px-4 py-3 rounded mb-4">
<strong>Alert Required!</strong>
<ul className="mt-2 list-disc list-inside">
{analysis.suggestedActions.map((action: string, i: number) => (
<li key={i}>{action}</li>
))}
</ul>
</div>
)}
</div>
)}
</div>
);
}
Run the frontend:
cd apps/web
pnpm dev
Visit: http://localhost:3000/dashboard
# Test Perspective API
curl "https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze?key=YOUR_KEY" \
-H "Content-Type: application/json" \
-d '{"comment": {"text": "test"}, "requestedAttributes": {"TOXICITY": {}}}'
# Test OpenAI API
curl https://api.openai.com/v1/moderations \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_KEY" \
-d '{"input": "test"}'
# Find process using port
lsof -i :3001
# Kill process
kill -9 <PID>
.envIf you encounter issues:
Ready to make a difference! You’re building an important tool to help keep youth safe in AI spaces.