-- Example: Simple SELECT query
SELECT * FROM users;
1.2 Database Concepts
-- Creating a table
CREATE TABLE users (
id INT PRIMARY KEY,
name VARCHAR(50),
email VARCHAR(100)
);
1.3 SQL History and Standards
-- SQL is standardized but varies slightly by system
-- Example: MySQL, PostgreSQL, SQL Server syntax differences
1.4 Setting Up a Database
-- MySQL: login and create database
mysql -u root -p
CREATE DATABASE sample_db;
1.5 Basic SQL Syntax
-- SQL statements end with a semicolon
SELECT column1, column2 FROM table_name;
1.6 SQL Data Types
CREATE TABLE products (
id INT,
name VARCHAR(100),
price DECIMAL(10,2),
created_at DATE
);
1.7 Introduction to SQL Clients
-- Example: Use MySQL Workbench or psql command line
mysql -u user -p
1.8 Running Your First Query
SELECT NOW();
1.9 Understanding Query Results
-- Example output:
-- +---------------------+
-- | NOW() |
-- +---------------------+
-- | 2025-07-06 10:00:00 |
-- +---------------------+
1.10 SQL Best Practices
-- Use uppercase for SQL keywords
SELECT name FROM users WHERE id = 1;
SELECT * FROM customers;
2.2 Selecting Specific Columns
SELECT name, email FROM customers;
2.3 Filtering Rows with WHERE
SELECT * FROM orders WHERE amount > 100;
2.4 Using Logical Operators
SELECT * FROM users WHERE age > 18 AND country = 'USA';
2.5 Sorting Results with ORDER BY
SELECT * FROM products ORDER BY price DESC;
2.6 Limiting Results with LIMIT
SELECT * FROM logs ORDER BY created_at DESC LIMIT 10;
2.7 Using DISTINCT
SELECT DISTINCT country FROM customers;
2.8 Using Aliases
SELECT name AS customer_name FROM customers;
2.9 Combining Filters (IN, BETWEEN)
SELECT * FROM orders WHERE status IN ('pending', 'shipped');
SELECT * FROM sales WHERE date BETWEEN '2025-01-01' AND '2025-01-31';
2.10 Handling NULL Values
SELECT * FROM users WHERE phone IS NOT NULL;
INSERT INTO users (name, email) VALUES ('John Doe', 'john@example.com');
3.2 Inserting Multiple Rows
INSERT INTO products (name, price) VALUES
('Product A', 10.99),
('Product B', 15.49);
3.3 INSERT with SELECT
INSERT INTO archive_orders SELECT * FROM orders WHERE status = 'completed';
3.4 UPDATE Statement Basics
UPDATE users SET email = 'newemail@example.com' WHERE id = 1;
3.5 Updating Multiple Columns
UPDATE products SET price = price * 1.1, updated_at = NOW() WHERE category = 'books';
3.6 DELETE Statement Basics
DELETE FROM sessions WHERE last_active < NOW() - INTERVAL 30 DAY;
3.7 DELETE with JOIN (Some SQL flavors)
DELETE orders FROM orders
JOIN customers ON orders.customer_id = customers.id
WHERE customers.status = 'inactive';
3.8 Using Transactions for Safety
BEGIN;
UPDATE accounts SET balance = balance - 100 WHERE id = 1;
UPDATE accounts SET balance = balance + 100 WHERE id = 2;
COMMIT;
3.9 Auto-Increment Columns
CREATE TABLE users (
id INT AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(50)
);
3.10 RETURNING Clause (Postgres)
UPDATE users SET active = FALSE WHERE last_login < '2024-01-01' RETURNING id, name;
CREATE TABLE users (
id INT PRIMARY KEY,
name VARCHAR(50)
);
4.2 Foreign Keys
CREATE TABLE orders (
id INT PRIMARY KEY,
user_id INT,
FOREIGN KEY (user_id) REFERENCES users(id)
);
4.3 Unique Constraints
ALTER TABLE users ADD CONSTRAINT unique_email UNIQUE(email);
4.4 NOT NULL Constraint
CREATE TABLE products (
id INT PRIMARY KEY,
name VARCHAR(100) NOT NULL
);
4.5 Default Values
CREATE TABLE users (
id INT PRIMARY KEY,
status VARCHAR(10) DEFAULT 'active'
);
4.6 CHECK Constraints
ALTER TABLE products ADD CONSTRAINT check_price CHECK (price >= 0);
4.7 Indexes
CREATE INDEX idx_name ON users(name);
4.8 Composite Keys
CREATE TABLE enrollment (
student_id INT,
course_id INT,
PRIMARY KEY (student_id, course_id)
);
4.9 Auto-Increment & Sequences
CREATE SEQUENCE user_seq START 1;
4.10 Dropping Constraints
ALTER TABLE users DROP CONSTRAINT unique_email;
SELECT users.name, orders.amount
FROM users
INNER JOIN orders ON users.id = orders.user_id;
5.2 LEFT JOIN
SELECT users.name, orders.amount
FROM users
LEFT JOIN orders ON users.id = orders.user_id;
5.3 RIGHT JOIN
SELECT users.name, orders.amount
FROM users
RIGHT JOIN orders ON users.id = orders.user_id;
5.4 FULL OUTER JOIN
SELECT users.name, orders.amount
FROM users
FULL OUTER JOIN orders ON users.id = orders.user_id;
5.5 CROSS JOIN
SELECT users.name, products.name
FROM users
CROSS JOIN products;
5.6 SELF JOIN
SELECT a.name AS employee, b.name AS manager
FROM employees a
LEFT JOIN employees b ON a.manager_id = b.id;
5.7 Using Aliases in Joins
SELECT u.name, o.amount
FROM users u
JOIN orders o ON u.id = o.user_id;
5.8 Joining Multiple Tables
SELECT u.name, o.amount, p.name
FROM users u
JOIN orders o ON u.id = o.user_id
JOIN products p ON o.product_id = p.id;
5.9 NATURAL JOIN
SELECT * FROM orders NATURAL JOIN customers;
5.10 USING Clause
SELECT * FROM orders JOIN customers USING (customer_id);
SELECT COUNT(*) FROM orders;
6.2 COUNT Function
SELECT COUNT(DISTINCT user_id) FROM orders;
6.3 SUM Function
SELECT SUM(amount) FROM payments;
6.4 AVG Function
SELECT AVG(price) FROM products;
6.5 MIN and MAX Functions
SELECT MIN(age), MAX(age) FROM users;
6.6 GROUP BY Clause
SELECT country, COUNT(*) FROM users GROUP BY country;
6.7 HAVING Clause
SELECT country, COUNT(*) FROM users GROUP BY country HAVING COUNT(*) > 10;
6.8 Combining GROUP BY with ORDER BY
SELECT category, SUM(sales) FROM products GROUP BY category ORDER BY SUM(sales) DESC;
6.9 Using NULLs in Aggregations
SELECT COUNT(column) FROM table; -- counts non-null only
6.10 Scalar Functions
SELECT UPPER(name), LENGTH(name) FROM users;
SELECT name FROM users WHERE id IN (SELECT user_id FROM orders);
7.2 Correlated Subqueries
SELECT name FROM users u WHERE EXISTS (SELECT 1 FROM orders o WHERE o.user_id = u.id);
7.3 EXISTS Clause
SELECT * FROM products p WHERE EXISTS (SELECT 1 FROM sales s WHERE s.product_id = p.id);
7.4 ANY and ALL Operators
SELECT * FROM orders WHERE amount > ANY (SELECT amount FROM refunds);
7.5 UNION and UNION ALL
SELECT name FROM customers
UNION
SELECT name FROM suppliers;
7.6 INTERSECT and EXCEPT
SELECT id FROM users
INTERSECT
SELECT user_id FROM orders;
7.7 CASE Statements
SELECT name,
CASE WHEN age >= 18 THEN 'Adult' ELSE 'Minor' END AS age_group
FROM users;
7.8 COALESCE Function
SELECT name, COALESCE(phone, 'No phone') FROM users;
7.9 CAST and CONVERT
SELECT CAST(price AS DECIMAL(10,2)) FROM products;
7.10 Window Functions Intro
SELECT name, salary, RANK() OVER (ORDER BY salary DESC) FROM employees;
CREATE VIEW active_users AS
SELECT * FROM users WHERE active = TRUE;
8.2 Creating Views
CREATE VIEW recent_orders AS
SELECT * FROM orders WHERE order_date > CURRENT_DATE - INTERVAL '7 days';
8.3 Updating Views
CREATE OR REPLACE VIEW recent_orders AS
SELECT * FROM orders WHERE order_date > CURRENT_DATE - INTERVAL '30 days';
8.4 Dropping Views
DROP VIEW IF EXISTS old_users;
8.5 What are Stored Procedures?
CREATE PROCEDURE IncreasePrice(IN percent DECIMAL)
BEGIN
UPDATE products SET price = price * (1 + percent);
END;
8.6 Creating Stored Procedures
DELIMITER //
CREATE PROCEDURE GetUserOrders(IN userId INT)
BEGIN
SELECT * FROM orders WHERE user_id = userId;
END //
DELIMITER ;
8.7 Calling Stored Procedures
CALL GetUserOrders(1);
8.8 Parameters in Procedures
CREATE PROCEDURE GetCount(OUT total INT)
BEGIN
SELECT COUNT(*) INTO total FROM users;
END;
8.9 Stored Functions
CREATE FUNCTION GetDiscount(price DECIMAL) RETURNS DECIMAL
BEGIN
RETURN price * 0.1;
END;
8.10 Dropping Procedures & Functions
DROP PROCEDURE IF EXISTS IncreasePrice;
DROP FUNCTION IF EXISTS GetDiscount;
BEGIN;
UPDATE accounts SET balance = balance - 100 WHERE id = 1;
UPDATE accounts SET balance = balance + 100 WHERE id = 2;
COMMIT;
9.2 ACID Properties
-- Ensured by DBMS internally
9.3 COMMIT and ROLLBACK
ROLLBACK; -- Undo changes if error occurs
9.4 Isolation Levels
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
9.5 Locking Mechanisms
SELECT * FROM accounts FOR UPDATE;
9.6 Deadlocks
-- Detect and resolve via timeout or retry
9.7 Savepoints
SAVEPOINT sp1;
ROLLBACK TO sp1;
9.8 Transaction Best Practices
-- Avoid user interaction inside transactions
9.9 Concurrent Access
-- Use proper isolation and locking
9.10 Monitoring Transactions
SELECT * FROM pg_stat_activity WHERE state = 'active';
CREATE INDEX idx_name ON users(name);
10.2 Types of Indexes
-- CREATE INDEX USING BTREE or HASH
10.3 Creating Indexes
CREATE UNIQUE INDEX idx_email ON users(email);
10.4 Dropping Indexes
DROP INDEX idx_name ON users;
10.5 EXPLAIN Query Plan
EXPLAIN SELECT * FROM orders WHERE amount > 100;
10.6 Optimizing SELECT Queries
SELECT * FROM users WHERE email = 'user@example.com';
10.7 Composite Indexes
CREATE INDEX idx_name_email ON users(name, email);
10.8 Indexing and JOINs
CREATE INDEX idx_user_id ON orders(user_id);
10.9 Query Caching
-- Depends on DBMS settings
10.10 Maintaining Indexes
ANALYZE TABLE users;
pg_dump mydb > backup.sql
11.2 Restoring Databases
psql mydb < backup.sql
11.3 User Management
CREATE USER readonly WITH PASSWORD 'secret';
11.4 Granting Permissions
GRANT SELECT ON users TO readonly;
11.5 Revoking Permissions
REVOKE INSERT ON orders FROM readonly;
11.6 Roles and Groups
CREATE ROLE admin;
GRANT admin TO alice;
11.7 Encryption Basics
-- Use SSL and data encryption functions
11.8 Auditing
-- Enable audit extensions or plugins
11.9 Security Best Practices
ALTER USER admin WITH PASSWORD 'newstrongpassword';
11.10 Monitoring Security
tail -f /var/log/postgresql/postgresql.log
EXPLAIN ANALYZE SELECT * FROM orders WHERE amount > 1000;
12.2 Index Optimization
CREATE INDEX idx_amount ON orders(amount);
12.3 Query Refactoring
SELECT id, name FROM users WHERE active = TRUE;
12.4 Avoiding SELECT *
SELECT name, email FROM users;
12.5 Use of LIMIT and OFFSET
SELECT * FROM products LIMIT 10 OFFSET 20;
12.6 Optimize Joins
CREATE INDEX idx_user_id ON orders(user_id);
12.7 Use Query Caching
-- Depends on DBMS caching
12.8 Partitioning Tables
CREATE TABLE sales_2023 PARTITION OF sales FOR VALUES FROM ('2023-01-01') TO ('2024-01-01');
12.9 Optimize Data Types
ALTER TABLE users ALTER COLUMN age TYPE SMALLINT;
12.10 Monitoring Tools
-- Use tools like pgAdmin or MySQL Workbench
CREATE TABLE users (id INT, name VARCHAR(100), birthdate DATE);
13.2 Numeric Types
CREATE TABLE products (price DECIMAL(10,2));
13.3 Character Types
CREATE TABLE articles (content TEXT);
13.4 Date & Time Types
CREATE TABLE events (event_date TIMESTAMP);
13.5 NULL vs NOT NULL
CREATE TABLE users (email VARCHAR(255) NOT NULL);
13.6 UNIQUE Constraint
ALTER TABLE users ADD CONSTRAINT unique_email UNIQUE(email);
13.7 PRIMARY KEY
CREATE TABLE orders (
id SERIAL PRIMARY KEY,
user_id INT
);
13.8 FOREIGN KEY
ALTER TABLE orders ADD CONSTRAINT fk_user FOREIGN KEY (user_id) REFERENCES users(id);
13.9 CHECK Constraints
ALTER TABLE users ADD CONSTRAINT chk_age CHECK (age >= 18);
13.10 Default Values
ALTER TABLE users ALTER COLUMN active SET DEFAULT TRUE;
pg_dump mydb > full_backup.sql
14.2 Backup Scheduling
cron job to run pg_dump daily
14.3 Restoring from Backups
psql mydb < full_backup.sql
14.4 Point-in-Time Recovery
-- configure WAL archiving
14.5 Backup Compression
pg_dump mydb | gzip > backup.sql.gz
14.6 Verifying Backups
gunzip -t backup.sql.gz
14.7 Backup Security
chmod 600 backup.sql.gz
14.8 Disaster Recovery Planning
-- Document recovery steps
14.9 Automated Recovery
bash restore.sh
14.10 Backup Best Practices
-- schedule regular test restores
WITH RECURSIVE cte AS (
SELECT id, parent_id FROM categories WHERE id = 1
UNION ALL
SELECT c.id, c.parent_id FROM categories c JOIN cte ON c.parent_id = cte.id
)
SELECT * FROM cte;
15.2 JSON Data Types
CREATE TABLE data (info JSON);
15.3 JSON Functions
SELECT info->'name' FROM data;
15.4 Full-Text Search
CREATE INDEX idx_fts ON documents USING gin(to_tsvector('english', content));
15.5 Common Table Expressions (CTE)
WITH cte AS (SELECT * FROM users WHERE active = TRUE) SELECT * FROM cte;
15.6 Window Functions Advanced
SELECT name, ROW_NUMBER() OVER (ORDER BY score DESC) FROM players;
15.7 Pivot Tables
-- Use CASE with GROUP BY for pivot
15.8 Dynamic SQL
PREPARE stmt FROM 'SELECT * FROM users WHERE age > ?';
EXECUTE stmt USING @age;
15.9 Recursive Triggers
-- Be careful to avoid infinite loops
15.10 SQL Extensions
-- PostgreSQL, MySQL, Oracle extensions
CREATE USER john WITH PASSWORD 'strongPass123';
16.2 Roles and Privileges
CREATE ROLE manager;
GRANT SELECT, UPDATE ON employees TO manager;
GRANT manager TO john;
16.3 Granting Permissions
GRANT INSERT ON orders TO john;
16.4 Revoking Permissions
REVOKE UPDATE ON employees FROM john;
16.5 Encryption Techniques
-- Enable SSL connections for your DB
16.6 Auditing Database Activity
-- Use audit extensions or triggers
16.7 Data Masking
SELECT name, '****' AS ssn FROM employees;
16.8 Compliance Standards
-- Enforce access policies per regulation
16.9 Secure Backup Practices
gpg --encrypt --recipient admin backup.sql
16.10 Incident Response
-- Document and follow breach protocols
SET log_statement = 'all';
17.2 Using EXPLAIN
EXPLAIN SELECT * FROM orders WHERE status = 'shipped';
17.3 Using EXPLAIN ANALYZE
EXPLAIN ANALYZE SELECT * FROM orders WHERE status = 'shipped';
17.4 Slow Query Logging
SET log_min_duration_statement = 1000; -- ms
17.5 Monitoring Active Sessions
SELECT * FROM pg_stat_activity WHERE state = 'active';
17.6 Analyzing Locks
SELECT * FROM pg_locks WHERE granted = false;
17.7 Using Performance Views
SELECT * FROM pg_stat_user_tables;
17.8 Log Rotation & Management
-- Configure log rotation in postgresql.conf
17.9 Alerting on Errors
-- Use monitoring tools like Nagios
17.10 Query Performance Tuning
-- Adjust indexes and rewrite queries
-- Collect data from multiple sources
18.2 Star Schema Design
CREATE TABLE sales_fact (...);
CREATE TABLE product_dim (...);
18.3 Snowflake Schema
-- Dimensions linked to sub-dimensions
18.4 ETL Basics
-- Use tools like Talend or custom scripts
18.5 Extract Phase
SELECT * FROM source_db.table;
18.6 Transform Phase
UPDATE staging SET col = TRIM(col);
18.7 Load Phase
INSERT INTO warehouse.table SELECT * FROM staging;
18.8 Incremental Loading
WHERE last_modified > last_load_date
18.9 Data Warehouse Optimization
CREATE INDEX idx_date ON sales_fact(sale_date);
18.10 Data Warehouse Maintenance
DELETE FROM sales_fact WHERE sale_date < '2020-01-01';
-- Examples: MongoDB, Cassandra
19.2 Differences Between SQL & NoSQL
-- SQL: structured, ACID; NoSQL: flexible, BASE
19.3 NewSQL Overview
-- Examples: Google Spanner, CockroachDB
19.4 Hybrid Architectures
-- Polyglot persistence
19.5 Integrating NoSQL Data
-- Use connectors or foreign data wrappers
19.6 JSON Support in SQL
SELECT info->'key' FROM table WHERE info->>'status' = 'active';
19.7 Using Foreign Data Wrappers
CREATE FOREIGN TABLE nosql_table (...) SERVER nosql_server;
19.8 Data Sync Between SQL & NoSQL
-- Use CDC or messaging queues
19.9 Transactions in NewSQL
BEGIN TRANSACTION;
-- Operations
COMMIT;
19.10 Choosing the Right DB
-- Analyze workload and requirements
-- Examples: AWS RDS, Azure SQL Database
20.2 Setting Up Cloud Instances
-- Use cloud portals or CLI
20.3 Connecting to Cloud DBs
psql -h cloud-db.example.com -U user -d dbname
20.4 Scaling Cloud Databases
-- Increase instance size or add read replicas
20.5 Cloud Backup & Recovery
-- Use cloud snapshot features
20.6 Security in Cloud Databases
-- Use VPC, SSL, and IAM roles
20.7 Monitoring Cloud DB Performance
-- Use CloudWatch, Azure Monitor
20.8 Serverless SQL Databases
-- Examples: Amazon Aurora Serverless
20.9 Multi-region Deployments
-- Configure cross-region replicas
20.10 Migrating to Cloud
-- Use migration services/tools
-- save queries in .sql files and run
psql -f script.sql
21.2 Shell Scripting with SQL
#!/bin/bash
psql -c "SELECT COUNT(*) FROM users;"
21.3 Scheduled Jobs
-- Use pgAgent or cron jobs
21.4 Dynamic SQL
EXECUTE 'SELECT * FROM ' || table_name;
21.5 Error Handling in Scripts
BEGIN
-- statements
EXCEPTION WHEN OTHERS THEN
-- error handling
END;
21.6 Using Variables in Scripts
DECLARE v_count INT;
SELECT COUNT(*) INTO v_count FROM users;
21.7 Logging in Scripts
RAISE NOTICE 'Processed % rows', v_count;
21.8 Automation Best Practices
-- Use version control for scripts
21.9 Integrating with Other Tools
import psycopg2
21.10 Scheduling with Cloud Services
-- AWS Lambda + CloudWatch Events
SELECT category, SUM(sales) FROM sales GROUP BY category;
22.2 Aggregate Functions
SELECT AVG(price) FROM products;
22.3 Window Functions
SELECT name, SUM(sales) OVER (PARTITION BY region) FROM sales_data;
22.4 CUBE and ROLLUP
SELECT region, product, SUM(sales)
FROM sales
GROUP BY CUBE(region, product);
22.5 Pivot Tables
-- Use CASE WHEN inside aggregate
22.6 Common Table Expressions
WITH recent_sales AS (
SELECT * FROM sales WHERE sale_date > CURRENT_DATE - INTERVAL '30 days'
) SELECT * FROM recent_sales;
22.7 Advanced Grouping Sets
GROUP BY GROUPING SETS ((region), (product), ());
22.8 Generating Reports
COPY (SELECT * FROM sales) TO '/tmp/sales.csv' CSV HEADER;
22.9 Data Visualization Integration
-- Use BI tools like Tableau or PowerBI
22.10 Performance Tips for Analytics
CREATE INDEX idx_sale_date ON sales(sale_date);
SELECT * FROM orders o INNER JOIN customers c ON o.customer_id = c.id;
23.2 Left Join
SELECT * FROM customers c LEFT JOIN orders o ON c.id = o.customer_id;
23.3 Right Join
SELECT * FROM orders o RIGHT JOIN customers c ON o.customer_id = c.id;
23.4 Full Outer Join
SELECT * FROM customers c FULL OUTER JOIN orders o ON c.id = o.customer_id;
23.5 Cross Join
SELECT * FROM colors CROSS JOIN sizes;
23.6 Self Join
SELECT e1.name, e2.name AS manager FROM employees e1 JOIN employees e2 ON e1.manager_id = e2.id;
23.7 Using UNION
SELECT city FROM customers
UNION
SELECT city FROM suppliers;
23.8 UNION ALL
SELECT city FROM customers
UNION ALL
SELECT city FROM suppliers;
23.9 INTERSECT
SELECT city FROM customers
INTERSECT
SELECT city FROM suppliers;
23.10 EXCEPT
SELECT city FROM customers
EXCEPT
SELECT city FROM suppliers;
CREATE TRIGGER update_timestamp BEFORE UPDATE ON users FOR EACH ROW EXECUTE FUNCTION update_modified_column();
24.2 BEFORE & AFTER Triggers
-- BEFORE or AFTER INSERT/UPDATE/DELETE
24.3 Row-level vs Statement-level
FOR EACH ROW or FOR EACH STATEMENT
24.4 Creating Trigger Functions
CREATE FUNCTION update_modified_column() RETURNS trigger AS $$
BEGIN
NEW.modified = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
24.5 Dropping Triggers
DROP TRIGGER update_timestamp ON users;
24.6 Event Scheduling
-- MySQL EVENT syntax
CREATE EVENT cleanup
ON SCHEDULE EVERY 1 DAY
DO DELETE FROM logs WHERE created < NOW() - INTERVAL 30 DAY;
24.7 Use Cases for Triggers
-- Log changes or enforce business rules
24.8 Recursive Triggers
-- Be careful to avoid infinite loops
24.9 Performance Impact
-- Use sparingly and optimize logic
24.10 Best Practices
-- Document trigger behavior clearly
pg_dump vs filesystem backup
25.2 Point-in-Time Recovery (PITR)
-- Use WAL archiving and recovery.conf
25.3 Backup Compression & Encryption
pg_dump db | gzip | gpg -c > backup.gz.gpg
25.4 Incremental Backups
-- Supported by some DBMS
25.5 Backup Verification
pg_restore --list backup.dump
25.6 Automating Backups
cron job or scheduler
25.7 Restoring to Different Servers
pg_restore -h otherhost -d newdb backup.dump
25.8 Disaster Recovery Planning
-- Document and test recovery procedures
25.9 Backup Best Practices
-- Rotate backups and test restores
25.10 Troubleshooting Backups
-- Check disk space, permissions
-- Document schema and dependencies
26.2 Schema Migration
ALTER TABLE users ADD COLUMN phone VARCHAR(20);
26.3 Data Migration
INSERT INTO new_table SELECT * FROM old_table;
26.4 Application Migration
-- Change connection strings
26.5 Rolling Upgrades
-- Use replicas and failover
26.6 Downtime Planning
-- Notify users and backup DB
26.7 Testing Migration
-- Use staging environment
26.8 Rollback Strategies
-- Backup before migration
26.9 Upgrade Tools
pg_upgrade or similar
26.10 Post-Migration Tasks
VACUUM ANALYZE;
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
27.2 Popular Extensions
CREATE EXTENSION postgis;
27.3 Installing Extensions
CREATE EXTENSION hstore;
27.4 Using Extensions
SELECT uuid_generate_v4();
27.5 Writing Custom Extensions
-- Write in C or PL languages
27.6 Managing Extensions
DROP EXTENSION hstore;
27.7 Extension Compatibility
-- Verify before upgrading DB
27.8 Security Considerations
-- Only trusted extensions
27.9 Extension Performance Impact
-- Test before production use
27.10 Extension Best Practices
-- Track installed extensions
CREATE TABLE config (data JSONB);
28.2 Querying JSON
SELECT data->'name' FROM config WHERE id = 1;
28.3 Updating JSON Fields
UPDATE config SET data = jsonb_set(data, '{age}', '30') WHERE id = 1;
28.4 Storing XML Data
CREATE TABLE xml_data (content XML);
28.5 Querying XML
SELECT xpath('/person/name/text()', content) FROM xml_data;
28.6 Validating JSON/XML
-- Use constraints or external validation
28.7 Indexing JSON/XML
CREATE INDEX idx_json ON config USING gin(data);
28.8 Converting JSON & XML
SELECT jsonb_to_xml(data) FROM config;
28.9 Performance Considerations
-- Use JSONB for better performance
28.10 Use Cases
-- Store semi-structured data efficiently
-- Use pgAdmin, Zabbix, Prometheus
29.2 Setting Threshold Alerts
-- Alert if CPU > 80% or disk < 10%
29.3 Monitoring Query Performance
SELECT * FROM pg_stat_statements ORDER BY total_time DESC LIMIT 10;
29.4 Monitoring Locks & Deadlocks
SELECT * FROM pg_locks WHERE NOT granted;
29.5 Alerting on Failures
-- Use email or Slack alerts
29.6 Monitoring Replication Lag
SELECT pg_last_wal_receive_lsn() - pg_last_wal_replay_lsn();
29.7 Monitoring Disk Usage
SELECT pg_size_pretty(pg_database_size('mydb'));
29.8 Log Analysis Automation
-- Use ELK stack or similar tools
29.9 Custom Metrics
-- Use extensions or custom queries
29.10 Continuous Improvement
-- Adjust queries and configs regularly
-- Examples: Google Spanner, Azure Cosmos DB
30.2 AI & Machine Learning Integration
-- Use ML models inside DBMS or via extensions
30.3 Blockchain & SQL
-- Hybrid blockchain-SQL solutions
30.4 Graph Databases
-- Use extensions like pgGraph
30.5 Multi-Model Databases
-- Examples: ArangoDB, Cosmos DB
30.6 Edge Computing & Databases
-- Lightweight DBs on edge devices
30.7 Automation & Self-tuning
-- Auto indexing and query optimization
30.8 Serverless Databases
-- Examples: AWS Aurora Serverless
30.9 Quantum Computing & Databases
-- Research ongoing
30.10 Preparing for Change
-- Continuous learning and adaptation
-- Avoid by using prepared statements
PREPARE stmt AS SELECT * FROM users WHERE username = $1;
EXECUTE stmt('admin');
31.2 User Authentication
CREATE USER app_user WITH PASSWORD 'securePass123';
31.3 Role-Based Access Control
CREATE ROLE read_only;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO read_only;
31.4 Granting & Revoking Permissions
GRANT INSERT, UPDATE ON orders TO app_user;
REVOKE DELETE ON orders FROM app_user;
31.5 Using Views for Security
CREATE VIEW public_order_view AS SELECT id, total FROM orders;
GRANT SELECT ON public_order_view TO read_only;
31.6 Encrypting Data at Rest
-- Use Transparent Data Encryption (TDE) features in DBMS
31.7 Encrypting Data in Transit
-- Enable SSL connections in PostgreSQL config
31.8 Auditing & Logging
-- Enable logging in postgresql.conf:
log_statement = 'all'
31.9 SQL Security Best Practices
-- Use least privilege principle
31.10 Incident Response
-- Review logs and revoke compromised credentials
EXPLAIN ANALYZE SELECT * FROM orders WHERE customer_id = 1;
32.2 Using Indexes Effectively
CREATE INDEX idx_customer ON orders(customer_id);
32.3 Avoiding Full Table Scans
-- Use indexed columns in WHERE clauses
SELECT * FROM orders WHERE customer_id = 5;
32.4 Analyzing Slow Queries
SELECT * FROM pg_stat_statements ORDER BY total_time DESC LIMIT 5;
32.5 Optimizing JOIN Operations
EXPLAIN SELECT * FROM orders o JOIN customers c ON o.customer_id = c.id;
32.6 Using LIMIT & OFFSET
SELECT * FROM orders ORDER BY order_date DESC LIMIT 10 OFFSET 20;
32.7 Query Caching
-- Use materialized views or cache layers
CREATE MATERIALIZED VIEW recent_orders AS SELECT * FROM orders WHERE order_date > NOW() - INTERVAL '1 day';
32.8 Database Statistics & VACUUM
VACUUM ANALYZE;
32.9 Connection Pooling
-- Use PgBouncer or similar tools
32.10 Monitoring Performance
SELECT * FROM pg_stat_activity;
CREATE INDEX idx_name ON customers(name);
33.2 Types of Indexes
CREATE INDEX idx_gin ON documents USING gin(to_tsvector('english', content));
33.3 When to Use Indexes
CREATE INDEX idx_order_date ON orders(order_date);
33.4 Composite Indexes
CREATE INDEX idx_customer_date ON orders(customer_id, order_date);
33.5 Unique Indexes
CREATE UNIQUE INDEX idx_email ON users(email);
33.6 Partial Indexes
CREATE INDEX idx_active_users ON users(email) WHERE active = true;
33.7 Index Maintenance
REINDEX INDEX idx_customer_date;
33.8 Downsides of Indexes
-- Avoid excessive indexes on write-heavy tables
33.9 Using EXPLAIN with Indexes
EXPLAIN SELECT * FROM orders WHERE customer_id = 10;
33.10 Indexing Best Practices
-- Monitor and tune regularly
CREATE PROCEDURE add_order(customer_id INT, amount DECIMAL) AS $$
BEGIN
INSERT INTO orders(customer_id, total) VALUES (customer_id, amount);
END;
$$ LANGUAGE plpgsql;
34.2 Creating Functions
CREATE FUNCTION get_total_orders(customer_id INT) RETURNS INT AS $$
BEGIN
RETURN (SELECT COUNT(*) FROM orders WHERE customer_id = customer_id);
END;
$$ LANGUAGE plpgsql;
34.3 Procedure vs Function
-- CALL procedure; SELECT function();
34.4 Parameters & Variables
DECLARE total_orders INT;
SELECT COUNT(*) INTO total_orders FROM orders WHERE customer_id = 1;
34.5 Control Structures
IF total_orders > 10 THEN
RAISE NOTICE 'High order count';
END IF;
34.6 Error Handling
BEGIN
-- statements
EXCEPTION WHEN others THEN
RAISE WARNING 'Error occurred';
END;
34.7 Returning Result Sets
CREATE FUNCTION get_orders() RETURNS TABLE(id INT, total DECIMAL) AS $$
BEGIN
RETURN QUERY SELECT id, total FROM orders;
END;
$$ LANGUAGE plpgsql;
34.8 Debugging Procedures
RAISE NOTICE 'Value: %', variable;
34.9 Performance Considerations
-- Keep procedures efficient
34.10 Best Practices
-- Comment and version control
BEGIN;
-- multiple statements
COMMIT;
35.2 ACID Properties
-- Guarantees of transactions
35.3 Isolation Levels
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
35.4 Locking Mechanisms
SELECT * FROM orders FOR UPDATE;
35.5 Deadlocks
-- Detect and resolve deadlocks
35.6 Handling Rollbacks
ROLLBACK;
35.7 Savepoints
SAVEPOINT sp1;
-- statements
ROLLBACK TO sp1;
35.8 Optimistic vs Pessimistic Concurrency
-- Choose strategy based on workload
35.9 Monitoring Locks
SELECT * FROM pg_locks WHERE NOT granted;
35.10 Best Practices
-- Avoid user interaction inside transactions
-- Large scale DB optimized for reads
36.2 Star Schema
CREATE TABLE sales_fact (...);
CREATE TABLE dim_date (...);
36.3 Snowflake Schema
-- More complex schema normalization
36.4 ETL Processes
-- Use tools like Apache NiFi or custom scripts
36.5 OLAP vs OLTP
-- OLAP for complex queries, OLTP for transactions
36.6 Partitioning Tables
CREATE TABLE sales PARTITION BY RANGE(order_date);
36.7 Materialized Views in Warehousing
CREATE MATERIALIZED VIEW monthly_sales AS SELECT date_trunc('month', order_date), SUM(total) FROM sales GROUP BY 1;
36.8 Data Warehouse Performance
-- Use columnar storage for analytics
36.9 Data Governance
-- Audit trails and data validation
36.10 Future of Data Warehousing
-- Use Snowflake, BigQuery, Redshift
-- Use Hadoop, Spark alongside SQL
37.2 SQL on Hadoop
SELECT * FROM hive_table WHERE date = '2024-01-01';
37.3 Using Spark SQL
spark.sql("SELECT * FROM sales WHERE region = 'NA'");
37.4 Data Lakes & SQL
-- Query Parquet or ORC files with SQL engines
37.5 Streaming Data & SQL
-- Use Apache Kafka + ksqlDB
37.6 Integration Challenges
-- Handle schema changes carefully
37.7 Cloud Big Data Services
-- AWS Athena, Google BigQuery
37.8 Security in Big Data
-- Use encryption and IAM roles
37.9 Query Optimization on Big Data
-- Use partition pruning and predicate pushdown
37.10 Future Trends
-- Automated query tuning and data cataloging
pg_dump for full backup;
pg_dump -F c for compressed backups
38.2 Backup Scheduling
0 2 * * * pg_dump mydb > /backups/mydb_$(date +\%F).sql
38.3 Backup Verification
pg_restore --list backup_file
38.4 Restoring Databases
pg_restore -d newdb backup_file
38.5 Point-in-Time Recovery
-- Use WAL logs and recovery.conf
38.6 Backup Encryption
pg_dump mydb | gzip | gpg -c > backup.gz.gpg
38.7 Cloud Backup Solutions
-- AWS S3, Azure Blob Storage
38.8 Disaster Recovery Planning
-- Document recovery steps and test
38.9 Backup Automation Tools
-- pgBackRest, Barman
38.10 Backup Best Practices
-- Rotate backups regularly
-- Master-slave replication example
39.2 Synchronous vs Asynchronous
-- Synchronous waits for confirmation, async does not
39.3 Setting up Streaming Replication
-- Configure primary and standby servers
39.4 Logical Replication
CREATE PUBLICATION my_pub FOR TABLE orders;
CREATE SUBSCRIPTION my_sub CONNECTION 'conninfo' PUBLICATION my_pub;
39.5 Replication Monitoring
SELECT * FROM pg_stat_replication;
39.6 Failover & Switchover
-- Use pg_ctl promote
39.7 Replication Lag Causes
-- Monitor with pg_stat_replication
39.8 Conflict Resolution
-- Use application logic or conflict handlers
39.9 Replication Security
-- Use SSL and firewall rules
39.10 Replication Best Practices
-- Monitor and document setup
-- Use clustering and failover solutions
40.2 Clustering Basics
-- Use Patroni, Pacemaker
40.3 Load Balancing
-- Use Pgpool-II or HAProxy
40.4 Automatic Failover
-- Configure tools like repmgr or Patroni
40.5 Data Consistency
-- Use synchronous replication
40.6 Monitoring HA Systems
-- Use Prometheus, Grafana
40.7 Backup in HA Environments
-- Backup from standby to reduce load
40.8 Scaling Clusters
-- Use horizontal scaling methods
40.9 Security in HA Clusters
-- Use encrypted connections and authentication
40.10 HA Best Practices
-- Document and rehearse disaster recovery
SELECT phone, '****-****' AS masked_ssn FROM users;
41.2 Row Level Security
CREATE POLICY user_policy ON orders FOR SELECT USING (user_id = current_user_id());
41.3 Transparent Data Encryption
-- Enable TDE feature in DBMS
41.4 Auditing with Triggers
CREATE TRIGGER audit_order AFTER INSERT OR UPDATE ON orders FOR EACH ROW EXECUTE PROCEDURE audit_function();
41.5 SQL Injection Prevention Techniques
PREPARE stmt AS SELECT * FROM users WHERE email = $1;
EXECUTE stmt('user@example.com');
41.6 Secure Coding Practices
-- Use input sanitization functions
41.7 Encryption Key Management
-- Use KMS or hardware security modules
41.8 Compliance Standards
-- Document compliance efforts
41.9 Incident Detection & Response
-- Use alerts on unusual queries
41.10 Security Automation
-- Use scripts and tools for regular checks
SELECT customer_id, order_date, SUM(total) OVER (PARTITION BY customer_id) FROM orders;
42.2 Common Table Expressions (CTEs)
WITH recent_orders AS (SELECT * FROM orders WHERE order_date > NOW() - INTERVAL '30 days')
SELECT * FROM recent_orders;
42.3 Recursive Queries
WITH RECURSIVE subordinates AS (
SELECT id, manager_id FROM employees WHERE id = 1
UNION
SELECT e.id, e.manager_id FROM employees e JOIN subordinates s ON e.manager_id = s.id
)
SELECT * FROM subordinates;
42.4 Pivoting Data
-- Use CASE statements for pivot
SELECT
product,
SUM(CASE WHEN month = 'Jan' THEN sales ELSE 0 END) AS Jan,
SUM(CASE WHEN month = 'Feb' THEN sales ELSE 0 END) AS Feb
FROM sales_data GROUP BY product;
42.5 Lateral Joins
SELECT c.name, o.total FROM customers c CROSS JOIN LATERAL (
SELECT total FROM orders WHERE customer_id = c.id ORDER BY order_date DESC LIMIT 1
) o;
42.6 JSON & XML Querying
SELECT data->'name' FROM json_table WHERE id = 1;
42.7 Full Text Search
SELECT * FROM documents WHERE to_tsvector(content) @@ to_tsquery('database');
42.8 Dynamic SQL
EXECUTE 'SELECT * FROM ' || tablename;
42.9 Error Handling in Queries
BEGIN
-- statements
EXCEPTION WHEN others THEN
-- handle error
END;
42.10 Query Optimization Tips
-- Avoid SELECT * and functions in WHERE clauses
CREATE TABLE events (event_time TIMESTAMP);
43.2 Time Zones in SQL
SELECT CURRENT_TIMESTAMP AT TIME ZONE 'UTC';
43.3 Interval Data Types
SELECT NOW() + INTERVAL '1 day';
43.4 Temporal Queries
SELECT * FROM orders WHERE order_date > NOW() - INTERVAL '7 days';
43.5 Valid Time vs Transaction Time
-- Track data validity and changes
43.6 Temporal Tables
-- Use triggers or system-versioned tables
43.7 Working with Timestamps
SELECT TO_CHAR(order_date, 'YYYY-MM-DD HH24:MI:SS') FROM orders;
43.8 Temporal Aggregations
SELECT DATE_TRUNC('month', order_date), COUNT(*) FROM orders GROUP BY 1;
43.9 Performance Considerations
CREATE INDEX idx_order_date ON orders(order_date);
43.10 Use Cases
-- Use temporal data for compliance
CREATE TABLE locations (geom geometry(Point, 4326));
44.2 Using PostGIS
CREATE EXTENSION postgis;
44.3 Spatial Queries
SELECT ST_Distance(geom1, geom2) FROM locations;
44.4 Indexing Geospatial Data
CREATE INDEX idx_locations_geom ON locations USING gist(geom);
44.5 Importing Spatial Data
shp2pgsql -I myshape.shp locations | psql mydb
44.6 Visualizing Data
-- Use QGIS or similar
44.7 Geospatial Functions
SELECT ST_Buffer(geom, 100) FROM locations;
44.8 Geospatial Data Applications
-- Analyze spatial relationships
44.9 Performance Tips
EXPLAIN ANALYZE SELECT * FROM locations WHERE ST_Intersects(geom, ST_MakeEnvelope(...));
44.10 Future of Geospatial SQL
-- Integration with IoT and AI
-- Relational vs document/key-value stores
45.2 Using JSON in SQL
CREATE TABLE users (profile JSONB);
45.3 Foreign Data Wrappers (FDW)
CREATE EXTENSION mongo_fdw;
45.4 Querying NoSQL via SQL
SELECT * FROM mongo_collection;
45.5 Syncing SQL & NoSQL
-- Use CDC tools or custom scripts
45.6 Data Modeling Differences
-- Normalize SQL; denormalize NoSQL
45.7 Use Cases for Integration
-- Use SQL for transactions, NoSQL for flexibility
45.8 Challenges in Integration
-- Plan schema mapping carefully
45.9 Tools & Connectors
-- Debezium, Kafka Connect
45.10 Future Trends
-- Emerging multi-model systems like ArangoDB
-- Plan entities and relationships
46.2 Normalization Forms
-- Eliminate redundancy and dependencies
46.3 Denormalization
-- Duplicate data for faster queries
46.4 Entity-Relationship Diagrams (ERDs)
-- Use tools like draw.io or ERDPlus
46.5 Handling Many-to-Many Relationships
CREATE TABLE order_products (
order_id INT,
product_id INT,
PRIMARY KEY(order_id, product_id)
);
46.6 Naming Conventions
-- Use singular table names, snake_case columns
46.7 Using Surrogate Keys
CREATE TABLE users (
id SERIAL PRIMARY KEY,
name TEXT
);
46.8 Handling Nulls
-- Use NOT NULL where possible
46.9 Documentation & Versioning
-- Use tools like Liquibase or Flyway
46.10 Data Modeling Tools
-- ER/Studio, MySQL Workbench
EXPLAIN ANALYZE SELECT * FROM orders WHERE customer_id = 10;
47.2 pg_stat_statements
SELECT * FROM pg_stat_statements ORDER BY total_time DESC LIMIT 5;
47.3 Auto_explain Module
-- Enable auto_explain in postgresql.conf
47.4 Using Index Advisors
-- Use third-party tools like pganalyze
47.5 Query Profilers
-- Use tools like pgBadger
47.6 Performance Dashboards
-- Grafana dashboards connected to Prometheus
47.7 Logging & Monitoring
-- Configure logging settings
47.8 Load Testing
-- Use tools like pgbench
47.9 Query Rewriting
-- Replace correlated subqueries with joins
47.10 Best Practices
-- Analyze and tune queries periodically
-- Use pgBackRest for incremental backups
48.2 Logical vs Physical Backups
-- pg_dump (logical), pg_basebackup (physical)
48.3 Continuous Archiving
archive_command = 'cp %p /mnt/server/archivedir/%f'
48.4 PITR Setup
-- Setup recovery.conf for PITR
48.5 Backup Testing
-- Restore backups regularly to test
48.6 Disaster Recovery Plans
-- Include backup and failover steps
48.7 Automating Backups
-- Schedule pg_dump scripts
48.8 Backup Compression
pg_dump mydb | gzip > backup.sql.gz
48.9 Backup Encryption
gpg -c backup.sql.gz
48.10 Monitoring Backup Jobs
-- Use monitoring tools or scripts
SELECT customer_id, SUM(total) FROM orders GROUP BY customer_id;
49.2 Grouping Sets
SELECT product, region, SUM(sales) FROM sales_data GROUP BY GROUPING SETS ((product), (region));
49.3 Rollup & Cube
SELECT product, region, SUM(sales) FROM sales_data GROUP BY ROLLUP(product, region);
49.4 Window Aggregates
SELECT order_date, SUM(total) OVER (ORDER BY order_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM orders;
49.5 Data Visualization Tools
-- Use Tableau, Power BI, or Metabase
49.6 Exporting Reports
COPY (SELECT * FROM sales) TO '/tmp/sales.csv' CSV HEADER;
49.7 Scheduled Reporting
-- Use cron jobs or DB scheduler
49.8 SQL for Data Science
-- Clean and aggregate data for ML
49.9 Handling Large Datasets
-- Use partitions and indexes
49.10 Reporting Best Practices
-- Validate data and format reports properly
-- Some DBs use ML for query tuning
50.2 Blockchain & SQL
-- Use SQL to query blockchain data
50.3 Graph SQL Extensions
-- Use extensions like Apache AGE
50.4 Cloud-Native SQL Databases
-- Use Google Spanner, AWS Aurora
50.5 Multi-Model Databases
-- Examples: ArangoDB, Cosmos DB
50.6 Serverless SQL
-- Use services like AWS Athena
50.7 Edge SQL Databases
-- Lightweight DBs for IoT
50.8 Quantum Computing & SQL
-- Research stage integration
50.9 Data Privacy Enhancements
-- Anonymize data during queries
50.10 Future Trends Summary
-- Stay updated with DB tech evolution
-- Combine data from multiple sources for analysis
51.2 ETL Processes
-- Use tools like Apache NiFi or SSIS for ETL
51.3 Star Schema Design
CREATE TABLE sales_fact (id INT, product_id INT, date_id INT, amount DECIMAL);
CREATE TABLE product_dim (product_id INT PRIMARY KEY, name TEXT);
51.4 Snowflake Schema
-- Dimension tables are normalized into multiple related tables
51.5 Data Partitioning
CREATE TABLE sales PARTITION BY RANGE (sale_date);
51.6 Materialized Views
CREATE MATERIALIZED VIEW monthly_sales AS
SELECT DATE_TRUNC('month', sale_date) AS month, SUM(amount) FROM sales GROUP BY month;
51.7 OLAP vs OLTP
-- OLAP optimized for queries and reporting
51.8 Data Warehouse Tools
-- Examples: Amazon Redshift, Snowflake, Google BigQuery
51.9 Slowly Changing Dimensions
-- Use type 2 SCD with history tables
51.10 Performance Tuning
CREATE INDEX idx_sales_date ON sales(sale_date);
-- Cloud providers: AWS RDS, Azure SQL Database
52.2 Benefits of Cloud SQL
-- Automatic backups and failover
52.3 Connecting to Cloud DBs
psql "host=mycloudhost dbname=mydb user=myuser password=mypass sslmode=require"
52.4 Cloud Security Practices
-- Enable SSL connections and least privilege access
52.5 Backup & Restore in Cloud
-- Use cloud console or CLI tools to create backups
52.6 Scaling Cloud Databases
-- Increase instance size or add read replicas
52.7 Serverless SQL Options
-- AWS Athena, Google BigQuery
52.8 Monitoring & Alerts
-- Use CloudWatch or Azure Monitor
52.9 Cost Management
-- Use reserved instances or auto pause features
52.10 Future of Cloud SQL
-- Growing use of AI and serverless architectures
CREATE PROCEDURE update_stock()
LANGUAGE plpgsql AS $$
BEGIN
UPDATE products SET stock = stock - 1 WHERE id = 101;
END;
$$;
53.2 Creating Functions
CREATE FUNCTION get_total_sales() RETURNS INT AS $$
DECLARE total INT;
BEGIN
SELECT SUM(amount) INTO total FROM sales;
RETURN total;
END;
$$ LANGUAGE plpgsql;
53.3 Procedure Parameters
CREATE PROCEDURE log_event(event_text TEXT) AS $$
BEGIN
INSERT INTO logs(description) VALUES (event_text);
END;
$$ LANGUAGE plpgsql;
53.4 Calling Procedures & Functions
CALL update_stock();
SELECT get_total_sales();
53.5 Exception Handling
BEGIN
-- code
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'Error occurred';
END;
53.6 Transaction Control
BEGIN;
UPDATE accounts SET balance = balance - 100 WHERE id = 1;
COMMIT;
53.7 Performance Tips
-- Use set-based operations over loops
53.8 Debugging Stored Code
RAISE NOTICE 'Variable value: %', var;
53.9 Versioning Stored Procedures
-- Keep scripts in version control
53.10 Best Practices
-- Document and test procedures thoroughly
CREATE INDEX idx_name ON users(name);
54.2 Query Plan Analysis
EXPLAIN SELECT * FROM orders WHERE status = 'shipped';
54.3 Optimizing Joins
SELECT * FROM orders o JOIN customers c ON o.customer_id = c.id;
54.4 Avoiding Full Table Scans
-- Add indexes on frequently queried columns
54.5 Caching Query Results
REFRESH MATERIALIZED VIEW monthly_sales;
54.6 Connection Pooling
-- Use PgBouncer or similar tools
54.7 Partitioning Large Tables
CREATE TABLE logs PARTITION BY RANGE (log_date);
54.8 Statistics & Histograms
ANALYZE orders;
54.9 Parallel Query Execution
-- Enable parallel queries in config
54.10 Monitoring Tools
-- Use pg_stat_statements or pgBadger
CREATE USER app_user WITH PASSWORD 'securepass';
55.2 Role-Based Access Control (RBAC)
CREATE ROLE readonly;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO readonly;
55.3 Encryption at Rest
-- Enable TDE or disk encryption
55.4 Encryption in Transit
-- Configure DB for SSL connections
55.5 SQL Injection Prevention
PREPARE stmt AS SELECT * FROM users WHERE email = $1;
EXECUTE stmt('test@example.com');
55.6 Auditing & Logging
-- Enable audit logs in DBMS
55.7 Regular Security Updates
-- Schedule regular updates
55.8 Password Policies
-- Use password complexity rules
55.9 Least Privilege Principle
REVOKE ALL ON mydb FROM PUBLIC;
55.10 Security Automation
-- Use tools like SQLMap and automated scripts
SELECT * FROM orders
INNER JOIN customers ON orders.customer_id = customers.id;
56.2 LEFT JOIN
SELECT * FROM customers
LEFT JOIN orders ON customers.id = orders.customer_id;
56.3 RIGHT JOIN
SELECT * FROM orders
RIGHT JOIN customers ON orders.customer_id = customers.id;
56.4 FULL OUTER JOIN
SELECT * FROM customers
FULL OUTER JOIN orders ON customers.id = orders.customer_id;
56.5 CROSS JOIN
SELECT * FROM colors
CROSS JOIN sizes;
56.6 SELF JOIN
SELECT a.name AS employee, b.name AS manager
FROM employees a
JOIN employees b ON a.manager_id = b.id;
56.7 USING Clause
SELECT * FROM orders
JOIN customers USING(customer_id);
56.8 NATURAL JOIN
SELECT * FROM orders
NATURAL JOIN customers;
56.9 JOIN with Aggregate
SELECT c.name, COUNT(o.id)
FROM customers c
LEFT JOIN orders o ON c.id = o.customer_id
GROUP BY c.name;
56.10 JOIN Performance Tips
CREATE INDEX idx_customer_id ON orders(customer_id);
CREATE VIEW active_users AS
SELECT * FROM users WHERE status = 'active';
57.2 Creating Views
CREATE VIEW recent_orders AS
SELECT * FROM orders WHERE order_date > CURRENT_DATE - INTERVAL '30 days';
57.3 Updating Views
CREATE OR REPLACE VIEW active_users AS
SELECT * FROM users WHERE active = TRUE;
57.4 Indexed Views
-- Supported in some DBs like SQL Server
CREATE UNIQUE CLUSTERED INDEX idx_view ON active_users (user_id);
57.5 Creating Indexes
CREATE INDEX idx_lastname ON users(last_name);
57.6 Composite Indexes
CREATE INDEX idx_name_dob ON users(last_name, date_of_birth);
57.7 Unique Indexes
CREATE UNIQUE INDEX idx_email ON users(email);
57.8 Partial Indexes
CREATE INDEX idx_active_users ON users(email) WHERE active = TRUE;
57.9 Index Maintenance
REINDEX TABLE users;
57.10 When Not to Use Indexes
-- Evaluate read/write ratio before indexing
-- Regular backups critical for data safety
58.2 Types of Backups
-- Full: all data; Incremental: changes since last backup
58.3 Backup Commands
pg_dump mydb > backup.sql
58.4 Scheduling Backups
0 2 * * * pg_dump mydb > /backups/db_$(date +\%F).sql
58.5 Restoring Data
psql mydb < backup.sql
58.6 Point-in-Time Recovery
-- Use WAL files and restore commands
58.7 Testing Backups
-- Restore backups to test servers periodically
58.8 Backup Encryption
gpg --encrypt --recipient user@example.com backup.sql
58.9 Cloud Backup Solutions
-- AWS S3, Azure Blob storage integration
58.10 Backup Retention Policies
-- Keep backups for required time, then delete
-- Analyze schemas, data volume, downtime
59.2 Exporting Data
mysqldump mydb > export.sql
59.3 Data Transformation
-- Use ETL tools or scripts for transformation
59.4 Importing Data
psql newdb < export.sql
59.5 Schema Migration
-- Use tools like Liquibase or Flyway
59.6 Handling Data Integrity
-- Use checksums and data validation scripts
59.7 Downtime Minimization
-- Use replication or blue-green deployments
59.8 Migration Testing
-- Run queries on migrated data for validation
59.9 Rollback Plans
-- Backup original DB and scripts to revert changes
59.10 Post-Migration Tasks
-- Rebuild indexes, analyze performance
-- Automate repetitive SQL tasks
60.2 SQL Scripting Basics
-- Write multiple SQL statements in a file
SELECT * FROM users;
UPDATE users SET active = TRUE WHERE last_login > NOW() - INTERVAL '30 days';
60.3 Using Shell Scripts
#!/bin/bash
psql mydb -f update_users.sql
60.4 Scheduled Jobs
# Cron example: run every day at 3 AM
0 3 * * * /path/to/script.sh
60.5 Using SQL Agents
-- SQL Server Agent jobs for automation
60.6 Dynamic SQL
EXECUTE('SELECT * FROM ' + @tableName);
60.7 Error Handling in Scripts
BEGIN TRY
-- SQL statements
END TRY
BEGIN CATCH
PRINT ERROR_MESSAGE();
END CATCH;
60.8 Logging and Notifications
-- Send email on job completion or failure
60.9 Version Control for Scripts
git add myscript.sql
git commit -m "Added automation script"
60.10 Best Practices
-- Comment your scripts and use consistent naming