734 lines
23 KiB
JavaScript
734 lines
23 KiB
JavaScript
#!/usr/bin/env node
|
||
// =============================================================================
|
||
// AgenciaPsi — Database CLI
|
||
// =============================================================================
|
||
// Uso: node db.cjs <comando> [opcoes]
|
||
//
|
||
// Comandos:
|
||
// setup Instalação do zero (schema + seeds)
|
||
// backup Exporta backup com data atual
|
||
// restore [data] Restaura de um backup (ex: 2026-03-23)
|
||
// migrate Aplica migrations pendentes
|
||
// seed [grupo] Roda seeds (all|users|system|test_data)
|
||
// status Mostra estado atual do banco
|
||
// diff Compara schema atual vs último backup
|
||
// reset Reseta o banco e reinstala tudo
|
||
// verify Verifica integridade dos dados essenciais
|
||
// help Mostra ajuda
|
||
// =============================================================================
|
||
|
||
const { execSync, spawnSync } = require('child_process');
|
||
const fs = require('fs');
|
||
const path = require('path');
|
||
const crypto = require('crypto');
|
||
|
||
// ---------------------------------------------------------------------------
|
||
// Config
|
||
// ---------------------------------------------------------------------------
|
||
const ROOT = __dirname;
|
||
const CONFIG = JSON.parse(fs.readFileSync(path.join(ROOT, 'db.config.json'), 'utf8'));
|
||
const CONTAINER = CONFIG.container;
|
||
const DB = CONFIG.database;
|
||
const USER = CONFIG.user;
|
||
|
||
// ---------------------------------------------------------------------------
|
||
// Colors (sem dependências externas)
|
||
// ---------------------------------------------------------------------------
|
||
const c = {
|
||
reset: '\x1b[0m',
|
||
bold: '\x1b[1m',
|
||
red: '\x1b[31m',
|
||
green: '\x1b[32m',
|
||
yellow: '\x1b[33m',
|
||
blue: '\x1b[34m',
|
||
cyan: '\x1b[36m',
|
||
gray: '\x1b[90m'
|
||
};
|
||
|
||
function log(msg) {
|
||
console.log(msg);
|
||
}
|
||
function info(msg) {
|
||
log(`${c.cyan}ℹ${c.reset} ${msg}`);
|
||
}
|
||
function ok(msg) {
|
||
log(`${c.green}✔${c.reset} ${msg}`);
|
||
}
|
||
function warn(msg) {
|
||
log(`${c.yellow}⚠${c.reset} ${msg}`);
|
||
}
|
||
function err(msg) {
|
||
log(`${c.red}✖${c.reset} ${msg}`);
|
||
}
|
||
function title(msg) {
|
||
log(`\n${c.bold}${c.blue}═══ ${msg} ═══${c.reset}\n`);
|
||
}
|
||
function step(msg) {
|
||
log(`${c.gray} →${c.reset} ${msg}`);
|
||
}
|
||
|
||
// ---------------------------------------------------------------------------
|
||
// Helpers
|
||
// ---------------------------------------------------------------------------
|
||
|
||
function dockerRunning() {
|
||
try {
|
||
const result = spawnSync('docker', ['inspect', '-f', '{{.State.Running}}', CONTAINER], {
|
||
encoding: 'utf8',
|
||
timeout: 10000,
|
||
stdio: ['pipe', 'pipe', 'pipe']
|
||
});
|
||
return result.stdout.trim() === 'true';
|
||
} catch {
|
||
return false;
|
||
}
|
||
}
|
||
|
||
function psql(sql, opts = {}) {
|
||
const cmd = `docker exec -i -e PGCLIENTENCODING=UTF8 ${CONTAINER} psql -U ${USER} -d ${DB} ${opts.tuples ? '-t' : ''} ${opts.quiet ? '-q' : ''} -c "${sql.replace(/"/g, '\\"')}"`;
|
||
return execSync(cmd, { encoding: 'utf8', timeout: 30000, stdio: ['pipe', 'pipe', 'pipe'], env: { ...process.env, PYTHONIOENCODING: 'utf-8', LANG: 'C.UTF-8' } }).trim();
|
||
}
|
||
|
||
function psqlFile(filePath) {
|
||
const absPath = path.resolve(filePath);
|
||
const content = fs.readFileSync(absPath, 'utf8');
|
||
// Prepend SET client_encoding to ensure UTF-8 inside the session
|
||
const utf8Content = "SET client_encoding TO 'UTF8';\n" + content;
|
||
const cmd = `docker exec -i -e PGCLIENTENCODING=UTF8 ${CONTAINER} psql -U ${USER} -d ${DB} -q`;
|
||
return execSync(cmd, { input: utf8Content, encoding: 'utf8', timeout: 120000, stdio: ['pipe', 'pipe', 'pipe'], env: { ...process.env, PYTHONIOENCODING: 'utf-8', LANG: 'C.UTF-8' } });
|
||
}
|
||
|
||
function pgDump(args) {
|
||
const cmd = `docker exec -e PGCLIENTENCODING=UTF8 ${CONTAINER} pg_dump -U ${USER} -d ${DB} ${args}`;
|
||
return execSync(cmd, { encoding: 'utf8', timeout: 120000, maxBuffer: 50 * 1024 * 1024 });
|
||
}
|
||
|
||
function today() {
|
||
return new Date().toISOString().slice(0, 10);
|
||
}
|
||
|
||
function fileHash(filePath) {
|
||
const content = fs.readFileSync(filePath, 'utf8');
|
||
return crypto.createHash('sha256').update(content).digest('hex').slice(0, 16);
|
||
}
|
||
|
||
function ensureDir(dir) {
|
||
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
|
||
}
|
||
|
||
function requireDocker() {
|
||
if (!dockerRunning()) {
|
||
err(`Container "${CONTAINER}" não está rodando.`);
|
||
log(`\n Inicie o Supabase primeiro:`);
|
||
log(` ${c.cyan}npx supabase start${c.reset}\n`);
|
||
process.exit(1);
|
||
}
|
||
}
|
||
|
||
function listBackups() {
|
||
const dir = path.join(ROOT, 'backups');
|
||
if (!fs.existsSync(dir)) return [];
|
||
return fs
|
||
.readdirSync(dir)
|
||
.filter((f) => /^\d{4}-\d{2}-\d{2}$/.test(f))
|
||
.sort()
|
||
.reverse();
|
||
}
|
||
|
||
// ---------------------------------------------------------------------------
|
||
// Migration tracking table
|
||
// ---------------------------------------------------------------------------
|
||
|
||
function ensureMigrationTable() {
|
||
psql(`
|
||
CREATE TABLE IF NOT EXISTS _db_migrations (
|
||
id SERIAL PRIMARY KEY,
|
||
filename TEXT NOT NULL UNIQUE,
|
||
hash TEXT NOT NULL,
|
||
category TEXT NOT NULL DEFAULT 'migration',
|
||
applied_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||
);
|
||
`);
|
||
}
|
||
|
||
function getAppliedMigrations() {
|
||
ensureMigrationTable();
|
||
const result = psql('SELECT filename, hash, category, applied_at::text FROM _db_migrations ORDER BY id;', { tuples: true });
|
||
if (!result) return [];
|
||
return result
|
||
.split('\n')
|
||
.filter(Boolean)
|
||
.map((line) => {
|
||
const [filename, hash, category, applied_at] = line.split('|').map((s) => s.trim());
|
||
return { filename, hash, category, applied_at };
|
||
});
|
||
}
|
||
|
||
function recordMigration(filename, hash, category) {
|
||
psql(`INSERT INTO _db_migrations (filename, hash, category) VALUES ('${filename}', '${hash}', '${category}') ON CONFLICT (filename) DO UPDATE SET hash = EXCLUDED.hash, applied_at = now();`);
|
||
}
|
||
|
||
// ---------------------------------------------------------------------------
|
||
// Commands
|
||
// ---------------------------------------------------------------------------
|
||
|
||
const commands = {};
|
||
|
||
// ---- SETUP ----
|
||
commands.setup = function () {
|
||
title('Setup — Instalação do zero');
|
||
requireDocker();
|
||
|
||
// 1. Schema
|
||
const schemaFile = path.join(ROOT, CONFIG.schema);
|
||
if (!fs.existsSync(schemaFile)) {
|
||
err(`Schema não encontrado: ${schemaFile}`);
|
||
process.exit(1);
|
||
}
|
||
|
||
info('Aplicando schema...');
|
||
psqlFile(schemaFile);
|
||
ok('Schema aplicado');
|
||
|
||
// 2. Fixes
|
||
info('Aplicando fixes...');
|
||
for (const fix of CONFIG.fixes) {
|
||
const fixPath = path.join(ROOT, 'fixes', fix);
|
||
if (fs.existsSync(fixPath)) {
|
||
step(fix);
|
||
psqlFile(fixPath);
|
||
}
|
||
}
|
||
ok(`${CONFIG.fixes.length} fixes aplicados`);
|
||
|
||
// 3. Seeds
|
||
commands.seed('all');
|
||
|
||
// 4. Migration table
|
||
ensureMigrationTable();
|
||
|
||
// 5. Record seeds as applied
|
||
const allSeeds = [...CONFIG.seeds.users, ...CONFIG.seeds.system];
|
||
for (const seed of allSeeds) {
|
||
const seedPath = path.join(ROOT, 'seeds', seed);
|
||
if (fs.existsSync(seedPath)) {
|
||
recordMigration(seed, fileHash(seedPath), 'seed');
|
||
}
|
||
}
|
||
for (const fix of CONFIG.fixes) {
|
||
const fixPath = path.join(ROOT, 'fixes', fix);
|
||
if (fs.existsSync(fixPath)) {
|
||
recordMigration(fix, fileHash(fixPath), 'fix');
|
||
}
|
||
}
|
||
|
||
ok('Setup completo!');
|
||
log('');
|
||
|
||
// 6. Auto-backup
|
||
info('Criando backup pós-setup...');
|
||
commands.backup();
|
||
|
||
// 7. Verify
|
||
commands.verify();
|
||
};
|
||
|
||
// ---- BACKUP ----
|
||
commands.backup = function () {
|
||
title('Backup');
|
||
requireDocker();
|
||
|
||
const date = today();
|
||
const dir = path.join(ROOT, 'backups', date);
|
||
ensureDir(dir);
|
||
|
||
const infraSchemas = ['storage', 'realtime', '_realtime', 'supabase_functions', 'extensions', 'graphql', 'graphql_public', 'pgsodium', 'vault', 'net', '_analytics'];
|
||
const excludeFlags = infraSchemas.map((s) => `--exclude-schema=${s}`).join(' ');
|
||
|
||
step('Exportando schema...');
|
||
const schema = pgDump('--schema-only --no-owner --no-privileges');
|
||
fs.writeFileSync(path.join(dir, 'schema.sql'), schema);
|
||
|
||
step('Exportando dados...');
|
||
const data = pgDump(`--data-only --no-owner --no-privileges ${excludeFlags}`);
|
||
fs.writeFileSync(path.join(dir, 'data.sql'), data);
|
||
|
||
step('Exportando dump completo...');
|
||
const full = pgDump('--no-owner --no-privileges');
|
||
fs.writeFileSync(path.join(dir, 'full_dump.sql'), full);
|
||
|
||
const sizes = ['schema.sql', 'data.sql', 'full_dump.sql'].map((f) => {
|
||
const stat = fs.statSync(path.join(dir, f));
|
||
return `${f}: ${(stat.size / 1024).toFixed(0)}KB`;
|
||
});
|
||
|
||
ok(`Backup salvo em backups/${date}/`);
|
||
sizes.forEach((s) => step(s));
|
||
|
||
// Cleanup old backups
|
||
cleanupBackups();
|
||
};
|
||
|
||
function cleanupBackups() {
|
||
const backups = listBackups();
|
||
const cutoff = new Date();
|
||
cutoff.setDate(cutoff.getDate() - CONFIG.backupRetentionDays);
|
||
const cutoffStr = cutoff.toISOString().slice(0, 10);
|
||
|
||
let removed = 0;
|
||
for (const b of backups) {
|
||
if (b < cutoffStr) {
|
||
const dir = path.join(ROOT, 'backups', b);
|
||
fs.rmSync(dir, { recursive: true, force: true });
|
||
removed++;
|
||
}
|
||
}
|
||
if (removed > 0) {
|
||
info(`${removed} backup(s) antigo(s) removido(s) (>${CONFIG.backupRetentionDays} dias)`);
|
||
}
|
||
}
|
||
|
||
// ---- RESTORE ----
|
||
commands.restore = function (dateArg) {
|
||
title('Restore');
|
||
requireDocker();
|
||
|
||
const backups = listBackups();
|
||
if (backups.length === 0) {
|
||
err('Nenhum backup encontrado.');
|
||
process.exit(1);
|
||
}
|
||
|
||
const date = dateArg || backups[0];
|
||
const dir = path.join(ROOT, 'backups', date);
|
||
|
||
if (!fs.existsSync(dir)) {
|
||
err(`Backup não encontrado: ${date}`);
|
||
log(`\n Backups disponíveis:`);
|
||
backups.forEach((b) => log(` ${c.cyan}${b}${c.reset}`));
|
||
process.exit(1);
|
||
}
|
||
|
||
const fullDump = path.join(dir, 'full_dump.sql');
|
||
const schemaFile = path.join(dir, 'schema.sql');
|
||
const dataFile = path.join(dir, 'data.sql');
|
||
|
||
// Safety backup before restore
|
||
info('Criando backup de segurança antes do restore...');
|
||
try {
|
||
commands.backup();
|
||
} catch {
|
||
warn('Não foi possível criar backup de segurança (banco pode estar vazio)');
|
||
}
|
||
|
||
if (fs.existsSync(fullDump)) {
|
||
info(`Restaurando de backups/${date}/full_dump.sql ...`);
|
||
|
||
// Drop and recreate public schema
|
||
step('Limpando schema public...');
|
||
psql('DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public; GRANT ALL ON SCHEMA public TO postgres; GRANT ALL ON SCHEMA public TO public;');
|
||
|
||
step('Aplicando full dump...');
|
||
psqlFile(fullDump);
|
||
} else if (fs.existsSync(schemaFile) && fs.existsSync(dataFile)) {
|
||
info(`Restaurando de backups/${date}/ (schema + data)...`);
|
||
|
||
step('Limpando schema public...');
|
||
psql('DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public; GRANT ALL ON SCHEMA public TO postgres; GRANT ALL ON SCHEMA public TO public;');
|
||
|
||
step('Aplicando schema...');
|
||
psqlFile(schemaFile);
|
||
|
||
step('Aplicando dados...');
|
||
psqlFile(dataFile);
|
||
} else {
|
||
err(`Backup incompleto em ${date}/`);
|
||
process.exit(1);
|
||
}
|
||
|
||
ok(`Banco restaurado de backups/${date}/`);
|
||
|
||
// Verify
|
||
commands.verify();
|
||
};
|
||
|
||
// ---- MIGRATE ----
|
||
commands.migrate = function () {
|
||
title('Migrate');
|
||
requireDocker();
|
||
ensureMigrationTable();
|
||
|
||
const migrationsDir = path.join(ROOT, 'migrations');
|
||
if (!fs.existsSync(migrationsDir)) {
|
||
info('Nenhuma pasta migrations/ encontrada.');
|
||
return;
|
||
}
|
||
|
||
const files = fs
|
||
.readdirSync(migrationsDir)
|
||
.filter((f) => f.endsWith('.sql'))
|
||
.sort();
|
||
|
||
if (files.length === 0) {
|
||
info('Nenhuma migration encontrada.');
|
||
return;
|
||
}
|
||
|
||
const applied = getAppliedMigrations().map((m) => m.filename);
|
||
const pending = files.filter((f) => !applied.includes(f));
|
||
|
||
if (pending.length === 0) {
|
||
ok('Todas as migrations já foram aplicadas.');
|
||
return;
|
||
}
|
||
|
||
// Auto-backup before migrating
|
||
info('Criando backup antes de migrar...');
|
||
commands.backup();
|
||
|
||
info(`${pending.length} migration(s) pendente(s):`);
|
||
for (const file of pending) {
|
||
step(`Aplicando ${file}...`);
|
||
const filePath = path.join(migrationsDir, file);
|
||
try {
|
||
psqlFile(filePath);
|
||
recordMigration(file, fileHash(filePath), 'migration');
|
||
ok(` ${file}`);
|
||
} catch (e) {
|
||
err(` FALHA em ${file}: ${e.message}`);
|
||
err('Migration abortada. Banco pode estar em estado parcial.');
|
||
err('Use "node db.cjs restore" para voltar ao backup.');
|
||
process.exit(1);
|
||
}
|
||
}
|
||
|
||
ok(`${pending.length} migration(s) aplicada(s)`);
|
||
};
|
||
|
||
// ---- SEED ----
|
||
commands.seed = function (group) {
|
||
const validGroups = ['all', 'users', 'system', 'test_data'];
|
||
if (!group) group = 'all';
|
||
|
||
if (!validGroups.includes(group)) {
|
||
err(`Grupo inválido: ${group}`);
|
||
log(` Grupos válidos: ${validGroups.join(', ')}`);
|
||
process.exit(1);
|
||
}
|
||
|
||
title(`Seeds — ${group}`);
|
||
requireDocker();
|
||
|
||
const groups = group === 'all' ? ['users', 'system'] : [group];
|
||
let total = 0;
|
||
|
||
for (const g of groups) {
|
||
const seeds = CONFIG.seeds[g];
|
||
if (!seeds || seeds.length === 0) continue;
|
||
|
||
info(`Grupo: ${g}`);
|
||
for (const seed of seeds) {
|
||
const seedPath = path.join(ROOT, 'seeds', seed);
|
||
if (!fs.existsSync(seedPath)) {
|
||
warn(` Arquivo não encontrado: ${seed}`);
|
||
continue;
|
||
}
|
||
step(seed);
|
||
try {
|
||
psqlFile(seedPath);
|
||
total++;
|
||
} catch (e) {
|
||
err(` FALHA em ${seed}: ${e.stderr || e.message}`);
|
||
process.exit(1);
|
||
}
|
||
}
|
||
}
|
||
|
||
ok(`${total} seed(s) aplicado(s)`);
|
||
};
|
||
|
||
// ---- STATUS ----
|
||
commands.status = function () {
|
||
title('Status');
|
||
requireDocker();
|
||
|
||
// Docker
|
||
ok(`Container: ${CONTAINER} (rodando)`);
|
||
|
||
// Backups
|
||
const backups = listBackups();
|
||
if (backups.length > 0) {
|
||
ok(`Último backup: ${backups[0]}`);
|
||
info(`Total de backups: ${backups.length}`);
|
||
} else {
|
||
warn('Nenhum backup encontrado');
|
||
}
|
||
|
||
// Migrations
|
||
try {
|
||
const applied = getAppliedMigrations();
|
||
if (applied.length > 0) {
|
||
info(`Migrations aplicadas: ${applied.length}`);
|
||
applied.slice(-5).forEach((m) => {
|
||
step(`${m.filename} ${c.gray}(${m.category}, ${m.applied_at})${c.reset}`);
|
||
});
|
||
}
|
||
|
||
// Pending
|
||
const migrationsDir = path.join(ROOT, 'migrations');
|
||
if (fs.existsSync(migrationsDir)) {
|
||
const files = fs.readdirSync(migrationsDir).filter((f) => f.endsWith('.sql'));
|
||
const pending = files.filter((f) => !applied.map((m) => m.filename).includes(f));
|
||
if (pending.length > 0) {
|
||
warn(`${pending.length} migration(s) pendente(s):`);
|
||
pending.forEach((f) => step(`${c.yellow}${f}${c.reset}`));
|
||
}
|
||
}
|
||
} catch {
|
||
info('Tabela _db_migrations não existe (rode setup primeiro)');
|
||
}
|
||
|
||
// DB counts
|
||
log('');
|
||
info('Dados no banco:');
|
||
const counts = [
|
||
['auth.users', 'SELECT count(*) FROM auth.users'],
|
||
['profiles', 'SELECT count(*) FROM profiles'],
|
||
['tenants', 'SELECT count(*) FROM tenants'],
|
||
['plans', 'SELECT count(*) FROM plans'],
|
||
['features', 'SELECT count(*) FROM features'],
|
||
['plan_features', 'SELECT count(*) FROM plan_features'],
|
||
['subscriptions', 'SELECT count(*) FROM subscriptions'],
|
||
['email_templates_global', 'SELECT count(*) FROM email_templates_global'],
|
||
['notification_templates', 'SELECT count(*) FROM notification_templates']
|
||
];
|
||
|
||
for (const [label, sql] of counts) {
|
||
try {
|
||
const count = psql(sql, { tuples: true }).trim();
|
||
const color = parseInt(count) > 0 ? c.green : c.red;
|
||
step(`${label}: ${color}${count}${c.reset}`);
|
||
} catch {
|
||
step(`${label}: ${c.gray}(tabela não existe)${c.reset}`);
|
||
}
|
||
}
|
||
};
|
||
|
||
// ---- DIFF ----
|
||
commands.diff = function () {
|
||
title('Diff — Schema');
|
||
requireDocker();
|
||
|
||
const backups = listBackups();
|
||
if (backups.length === 0) {
|
||
err('Nenhum backup para comparar. Rode "node db.cjs backup" primeiro.');
|
||
process.exit(1);
|
||
}
|
||
|
||
const lastBackup = backups[0];
|
||
const lastSchemaPath = path.join(ROOT, 'backups', lastBackup, 'schema.sql');
|
||
if (!fs.existsSync(lastSchemaPath)) {
|
||
err(`Schema não encontrado no backup ${lastBackup}`);
|
||
process.exit(1);
|
||
}
|
||
|
||
info('Exportando schema atual...');
|
||
const currentSchema = pgDump('--schema-only --no-owner --no-privileges');
|
||
|
||
const lastSchema = fs.readFileSync(lastSchemaPath, 'utf8');
|
||
|
||
// Extract table definitions for comparison
|
||
const extractTables = (sql) => {
|
||
const tables = {};
|
||
const regex = /CREATE TABLE (?:IF NOT EXISTS )?(\S+)\s*\(([\s\S]*?)\);/g;
|
||
let match;
|
||
while ((match = regex.exec(sql)) !== null) {
|
||
tables[match[1]] = match[2].trim();
|
||
}
|
||
return tables;
|
||
};
|
||
|
||
const currentTables = extractTables(currentSchema);
|
||
const lastTables = extractTables(lastSchema);
|
||
|
||
const allTables = new Set([...Object.keys(currentTables), ...Object.keys(lastTables)]);
|
||
|
||
let added = 0,
|
||
removed = 0,
|
||
changed = 0,
|
||
unchanged = 0;
|
||
|
||
for (const table of [...allTables].sort()) {
|
||
if (!lastTables[table]) {
|
||
log(` ${c.green}+ ${table}${c.reset} (nova)`);
|
||
added++;
|
||
} else if (!currentTables[table]) {
|
||
log(` ${c.red}- ${table}${c.reset} (removida)`);
|
||
removed++;
|
||
} else if (currentTables[table] !== lastTables[table]) {
|
||
log(` ${c.yellow}~ ${table}${c.reset} (alterada)`);
|
||
changed++;
|
||
} else {
|
||
unchanged++;
|
||
}
|
||
}
|
||
|
||
log('');
|
||
ok(`Comparado com backup de ${lastBackup}:`);
|
||
step(`${added} nova(s), ${changed} alterada(s), ${removed} removida(s), ${unchanged} sem mudança`);
|
||
};
|
||
|
||
// ---- RESET ----
|
||
commands.reset = function () {
|
||
title('Reset — CUIDADO');
|
||
requireDocker();
|
||
|
||
// Safety backup
|
||
info('Criando backup antes do reset...');
|
||
try {
|
||
commands.backup();
|
||
} catch {
|
||
warn('Não foi possível criar backup');
|
||
}
|
||
|
||
warn('Resetando schema public...');
|
||
psql('DROP SCHEMA IF EXISTS public CASCADE; CREATE SCHEMA public; GRANT ALL ON SCHEMA public TO postgres; GRANT ALL ON SCHEMA public TO public;');
|
||
ok('Schema public resetado');
|
||
|
||
// Re-run setup
|
||
commands.setup();
|
||
};
|
||
|
||
// ---- VERIFY ----
|
||
commands.verify = function () {
|
||
title('Verificação de integridade');
|
||
requireDocker();
|
||
|
||
const checks = [
|
||
{ name: 'auth.users', sql: 'SELECT count(*) FROM auth.users', min: 1 },
|
||
{ name: 'profiles', sql: 'SELECT count(*) FROM profiles', min: 1 },
|
||
{ name: 'tenants', sql: 'SELECT count(*) FROM tenants', min: 1 },
|
||
{ name: 'plans', sql: 'SELECT count(*) FROM plans', min: 7 },
|
||
{ name: 'features', sql: 'SELECT count(*) FROM features', min: 20 },
|
||
{ name: 'plan_features', sql: 'SELECT count(*) FROM plan_features', min: 50 },
|
||
{ name: 'subscriptions', sql: 'SELECT count(*) FROM subscriptions', min: 1 },
|
||
{ name: 'email_templates', sql: 'SELECT count(*) FROM email_templates_global', min: 10 }
|
||
];
|
||
|
||
let pass = 0,
|
||
fail = 0;
|
||
|
||
for (const check of checks) {
|
||
try {
|
||
const count = parseInt(psql(check.sql, { tuples: true }).trim());
|
||
if (count >= check.min) {
|
||
ok(`${check.name}: ${count} (mín: ${check.min})`);
|
||
pass++;
|
||
} else {
|
||
err(`${check.name}: ${count} (esperado ≥ ${check.min})`);
|
||
fail++;
|
||
}
|
||
} catch {
|
||
err(`${check.name}: tabela não existe`);
|
||
fail++;
|
||
}
|
||
}
|
||
|
||
// Check entitlements view
|
||
try {
|
||
const ent = psql('SELECT count(*) FROM v_tenant_entitlements;', { tuples: true }).trim();
|
||
ok(`v_tenant_entitlements: ${ent} registros`);
|
||
pass++;
|
||
} catch {
|
||
err('v_tenant_entitlements: view não existe');
|
||
fail++;
|
||
}
|
||
|
||
log('');
|
||
if (fail === 0) {
|
||
ok(`${c.bold}Todos os ${pass} checks passaram!${c.reset}`);
|
||
} else {
|
||
err(`${fail} check(s) falharam, ${pass} passaram`);
|
||
}
|
||
};
|
||
|
||
// ---- HELP ----
|
||
commands.help = function () {
|
||
log(`
|
||
${c.bold}AgenciaPsi — Database CLI${c.reset}
|
||
|
||
${c.cyan}Uso:${c.reset} node db.cjs <comando> [opções]
|
||
|
||
${c.cyan}Comandos:${c.reset}
|
||
|
||
${c.bold}setup${c.reset} Instalação do zero (schema + fixes + seeds)
|
||
Cria backup automático após concluir
|
||
|
||
${c.bold}backup${c.reset} Exporta banco para backups/YYYY-MM-DD/
|
||
Gera: schema.sql, data.sql, full_dump.sql
|
||
|
||
${c.bold}restore [data]${c.reset} Restaura de um backup
|
||
Sem data = último backup disponível
|
||
Ex: node db.cjs restore 2026-03-23
|
||
|
||
${c.bold}migrate${c.reset} Aplica migrations pendentes (pasta migrations/)
|
||
Backup automático antes de aplicar
|
||
|
||
${c.bold}seed [grupo]${c.reset} Roda seeds (all, users, system, test_data)
|
||
Ex: node db.cjs seed system
|
||
|
||
${c.bold}status${c.reset} Mostra estado do banco, backups, migrations
|
||
|
||
${c.bold}diff${c.reset} Compara schema atual vs último backup
|
||
|
||
${c.bold}reset${c.reset} Reseta o banco e reinstala tudo do zero
|
||
${c.yellow}⚠ Cria backup antes de resetar${c.reset}
|
||
|
||
${c.bold}verify${c.reset} Verifica integridade dos dados essenciais
|
||
|
||
${c.bold}help${c.reset} Mostra esta ajuda
|
||
|
||
${c.cyan}Exemplos:${c.reset}
|
||
|
||
${c.gray}# Primeira vez — instala tudo${c.reset}
|
||
node db.cjs setup
|
||
|
||
${c.gray}# Backup diário${c.reset}
|
||
node db.cjs backup
|
||
|
||
${c.gray}# Perdi o banco — restaurar${c.reset}
|
||
node db.cjs restore
|
||
|
||
${c.gray}# Nova migration${c.reset}
|
||
node db.cjs migrate
|
||
|
||
${c.gray}# Ver o que tem no banco${c.reset}
|
||
node db.cjs status
|
||
`);
|
||
};
|
||
|
||
// ---------------------------------------------------------------------------
|
||
// Main
|
||
// ---------------------------------------------------------------------------
|
||
|
||
const [, , cmd, ...args] = process.argv;
|
||
|
||
if (!cmd || cmd === 'help' || cmd === '--help' || cmd === '-h') {
|
||
commands.help();
|
||
process.exit(0);
|
||
}
|
||
|
||
if (!commands[cmd]) {
|
||
err(`Comando desconhecido: ${cmd}`);
|
||
log(` Use ${c.cyan}node db.cjs help${c.reset} para ver os comandos disponíveis.`);
|
||
process.exit(1);
|
||
}
|
||
|
||
try {
|
||
commands[cmd](...args);
|
||
} catch (e) {
|
||
err(`Erro: ${e.message}`);
|
||
if (process.env.DEBUG) console.error(e);
|
||
process.exit(1);
|
||
}
|