diff --git a/backend/src/middleware/auth.js b/backend/src/middleware/auth.js index dda569c..9d2eca2 100644 --- a/backend/src/middleware/auth.js +++ b/backend/src/middleware/auth.js @@ -12,8 +12,10 @@ const authenticateToken = async (req, res, next) => { }); } - // Log token for debugging (remove in production) - console.log('Token received:', token.substring(0, 20) + '...'); + // Log token prefix only in non-production for debugging + if (process.env.NODE_ENV !== 'production') { + console.log('Token received:', token.substring(0, 20) + '...'); + } try { const decoded = jwt.verify(token, process.env.JWT_SECRET); @@ -75,4 +77,4 @@ module.exports = { authenticateToken, requireAdmin, requireOwnership -}; \ No newline at end of file +}; diff --git a/backend/src/routes/applications.js b/backend/src/routes/applications.js index 88f6ff9..6a14d33 100644 --- a/backend/src/routes/applications.js +++ b/backend/src/routes/applications.js @@ -76,7 +76,7 @@ router.get('/plans', async (req, res, next) => { SUM(DISTINCT ls.area) as total_section_area, COUNT(DISTINCT aps.lawn_section_id) as section_count, p.id as property_id, p.name as property_name, p.address as property_address, - ue.custom_name as equipment_name, et.name as equipment_type + ue.id as equipment_id, ue.custom_name as equipment_name, et.name as equipment_type FROM application_plans ap JOIN application_plan_sections aps ON ap.id = aps.plan_id JOIN lawn_sections ls ON aps.lawn_section_id = ls.id @@ -84,7 +84,7 @@ router.get('/plans', async (req, res, next) => { LEFT JOIN user_equipment ue ON ap.equipment_id = ue.id LEFT JOIN equipment_types et ON ue.equipment_type_id = et.id WHERE ${whereClause} - GROUP BY ap.id, p.id, p.name, p.address, ue.custom_name, et.name + GROUP BY ap.id, p.id, p.name, p.address, ue.id, ue.custom_name, et.name ORDER BY ap.planned_date DESC, ap.created_at DESC`, queryParams ); @@ -144,46 +144,37 @@ router.get('/plans', async (req, res, next) => { })); // Only get spreader settings for granular applications with equipment - if (plan.equipment_name && productsResult.rows.length > 0) { + if (plan.equipment_id && productsResult.rows.length > 0) { const firstProduct = productsResult.rows[0]; - console.log('Checking spreader settings for plan:', { - planId: plan.id, - equipmentName: plan.equipment_name, - productInfo: { - productId: firstProduct.product_id, - userProductId: firstProduct.user_product_id, - sharedName: firstProduct.shared_name, - productType: firstProduct.product_type - } - }); + if (process.env.NODE_ENV !== 'production') { + console.log('Checking spreader settings for plan:', { + planId: plan.id, + equipmentId: plan.equipment_id, + productInfo: { + productId: firstProduct.product_id, + userProductId: firstProduct.user_product_id, + sharedName: firstProduct.shared_name, + productType: firstProduct.product_type + } + }); + } const productType = firstProduct.product_type; - console.log('Detected product type:', productType); + if (process.env.NODE_ENV !== 'production') { + console.log('Detected product type:', productType); + } if (productType === 'granular') { - // Get equipment ID - const equipmentResult = await pool.query( - 'SELECT id FROM user_equipment WHERE custom_name = $1 AND user_id = $2', - [plan.equipment_name, req.user.id] + // Use equipment_id directly to avoid name-based lookup + const equipmentId = plan.equipment_id; + spreaderSetting = await getSpreaderSettingsForEquipment( + equipmentId, + firstProduct.product_id, + firstProduct.user_product_id, + req.user.id ); - - console.log('Equipment lookup result:', { - equipmentName: plan.equipment_name, - userId: req.user.id, - foundEquipment: equipmentResult.rows.length > 0, - equipmentId: equipmentResult.rows[0]?.id - }); - - if (equipmentResult.rows.length > 0) { - const equipmentId = equipmentResult.rows[0].id; - spreaderSetting = await getSpreaderSettingsForEquipment( - equipmentId, - firstProduct.product_id, - firstProduct.user_product_id, - req.user.id - ); - + if (process.env.NODE_ENV !== 'production') { console.log('Spreader setting lookup result:', { equipmentId, productId: firstProduct.product_id, @@ -207,6 +198,7 @@ router.get('/plans', async (req, res, next) => { propertyId: plan.property_id, // Add property ID for frontend to use propertyName: plan.property_name, propertyAddress: plan.property_address, + equipmentId: plan.equipment_id, equipmentName: plan.equipment_name || plan.equipment_type, productCount: parseInt(planProductInfo.product_count || 0), totalProductAmount: parseFloat(planProductInfo.total_product_amount || 0), @@ -1491,4 +1483,4 @@ router.get('/logs', async (req, res, next) => { } }); -module.exports = router; \ No newline at end of file +module.exports = router; diff --git a/database/migrations/add_core_indexes.sql b/database/migrations/add_core_indexes.sql new file mode 100644 index 0000000..3c4cd29 --- /dev/null +++ b/database/migrations/add_core_indexes.sql @@ -0,0 +1,26 @@ +-- Performance indexes for common queries and joins + +-- Application plans +CREATE INDEX IF NOT EXISTS idx_application_plans_user_id ON application_plans(user_id); +CREATE INDEX IF NOT EXISTS idx_application_plans_user_status ON application_plans(user_id, status); +CREATE INDEX IF NOT EXISTS idx_application_plans_user_planned_date ON application_plans(user_id, planned_date); + +-- Application plan products +CREATE INDEX IF NOT EXISTS idx_application_plan_products_plan_id ON application_plan_products(plan_id); + +-- Properties and lawn sections +CREATE INDEX IF NOT EXISTS idx_properties_user_id ON properties(user_id); +CREATE INDEX IF NOT EXISTS idx_lawn_sections_property_id ON lawn_sections(property_id); + +-- User equipment +CREATE INDEX IF NOT EXISTS idx_user_equipment_user_id ON user_equipment(user_id); + +-- User products and products +CREATE INDEX IF NOT EXISTS idx_user_products_user_id ON user_products(user_id); +CREATE INDEX IF NOT EXISTS idx_products_category_id ON products(category_id); + +-- Weather logs (if present) +-- CREATE INDEX IF NOT EXISTS idx_weather_logs_property_id ON weather_logs(property_id); + +SELECT 'Core indexes added' AS migration_status; + diff --git a/docker-compose.yml b/docker-compose.yml index 76bf687..7bab655 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -80,5 +80,22 @@ services: restart: unless-stopped # Database should not be exposed to proxy network for security + flyway: + image: flyway/flyway:9 + depends_on: + - db + environment: + - FLYWAY_URL=jdbc:postgresql://db:5432/${DB_NAME:-turftracker} + - FLYWAY_USER=${DB_USER:-turftracker} + - FLYWAY_PASSWORD=${DB_PASSWORD:-password123} + # Uncomment if you need to baseline an existing DB without schema history + # - FLYWAY_BASELINE_ON_MIGRATE=true + command: -locations=filesystem:/migrations migrate + volumes: + - ./database/migrations:/migrations:ro + networks: + - turftracker + # Not started automatically; run with: docker compose run --rm flyway migrate + volumes: - postgres_data: \ No newline at end of file + postgres_data: diff --git a/frontend/src/services/api.js b/frontend/src/services/api.js index 890cc62..49308f9 100644 --- a/frontend/src/services/api.js +++ b/frontend/src/services/api.js @@ -26,7 +26,10 @@ const apiClient = axios.create({ apiClient.interceptors.request.use( (config) => { const token = localStorage.getItem('authToken'); - console.log('Token from localStorage:', token); + if (process.env.NODE_ENV !== 'production') { + // Minimal visibility during development only + if (token) console.log('Auth token present'); + } if (token && token !== 'undefined' && token !== 'null') { config.headers.Authorization = `Bearer ${token}`; } @@ -248,4 +251,4 @@ export const formatApiResponse = (response) => { }; // Export the configured axios instance for custom requests -export default apiClient; \ No newline at end of file +export default apiClient; diff --git a/scripts/deploy.sh b/scripts/deploy.sh new file mode 100644 index 0000000..92e117e --- /dev/null +++ b/scripts/deploy.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'USAGE' +Usage: scripts/deploy.sh -r -d [-b ] [-m] + +Options: + -r Remote SSH host in the form user@host (or host if configured in SSH config) + -d Absolute path to the repo on the remote host + -b Git branch to deploy (default: main) + -m Run DB migrations via Flyway after building images + +Environment overrides: + TT_REMOTE_HOST, TT_REMOTE_PATH, TT_BRANCH, TT_RUN_MIGRATIONS (0/1) + +Examples: + scripts/deploy.sh -r ubuntu@myserver -d /opt/apps/turftracker -b main -m + TT_REMOTE_HOST=ubuntu@myserver TT_REMOTE_PATH=/opt/apps/turftracker scripts/deploy.sh +USAGE +} + +REMOTE_HOST="${TT_REMOTE_HOST:-}" +REMOTE_PATH="${TT_REMOTE_PATH:-}" +BRANCH="${TT_BRANCH:-main}" +RUN_MIGRATIONS="${TT_RUN_MIGRATIONS:-0}" + +while getopts ":r:d:b:mh" opt; do + case $opt in + r) REMOTE_HOST="$OPTARG" ;; + d) REMOTE_PATH="$OPTARG" ;; + b) BRANCH="$OPTARG" ;; + m) RUN_MIGRATIONS="1" ;; + h) usage; exit 0 ;; + \?) echo "Invalid option: -$OPTARG" >&2; usage; exit 2 ;; + :) echo "Option -$OPTARG requires an argument." >&2; usage; exit 2 ;; + esac +done + +if [[ -z "$REMOTE_HOST" || -z "$REMOTE_PATH" ]]; then + echo "Error: remote host and remote path are required" >&2 + usage + exit 2 +fi + +echo "Deploying to $REMOTE_HOST:$REMOTE_PATH (branch: $BRANCH, migrations: $RUN_MIGRATIONS)" + +# Compose wrapper on remote: prefer v2 (`docker compose`), fallback to v1 (`docker-compose`) +read -r -d '' REMOTE_SCRIPT <<'EOS' +set -euo pipefail + +COMPOSE_CMD="docker compose" +if ! $COMPOSE_CMD version >/dev/null 2>&1; then + if command -v docker-compose >/dev/null 2>&1; then + COMPOSE_CMD="docker-compose" + else + echo "docker compose or docker-compose not found on remote host" >&2 + exit 1 + fi +fi + +cd "$REPO_PATH" +echo "[remote] Using compose: $COMPOSE_CMD" +echo "[remote] Updating repo to branch $BRANCH" +git fetch --all --prune +git checkout "$BRANCH" +git pull --ff-only origin "$BRANCH" + +echo "[remote] Building services: backend, frontend" +$COMPOSE_CMD build backend frontend + +if [[ "${RUN_MIGRATIONS}" == "1" ]]; then + echo "[remote] Running Flyway migrations" + # Ensure DB is reachable (optional: add a wait loop if needed) + $COMPOSE_CMD run --rm flyway -locations=filesystem:/migrations migrate +fi + +echo "[remote] Restarting services" +$COMPOSE_CMD up -d backend frontend + +echo "[remote] Deployment complete" +EOS + +ssh -o BatchMode=yes "$REMOTE_HOST" \ + REPO_PATH="$REMOTE_PATH" BRANCH="$BRANCH" RUN_MIGRATIONS="$RUN_MIGRATIONS" \ + "bash -s" <<< "$REMOTE_SCRIPT" + +echo "Done." +