-- ── INSERT ──
INSERT INTO users (name, email, age) VALUES ('Alice', 'a@b.com', 30);
INSERT INTO users (name, email) VALUES ('Bob', 'bob@c.com');
INSERT INTO users (name, email) VALUES ('Carol', 'carol@d.com');
-- Insert multiple rows
INSERT INTO users (name, email, age) VALUES
('Dave', 'dave@e.com', 25),
('Eve', 'eve@f.com', 28),
('Frank', 'frank@g.com', 35);
-- INSERT OR ... (conflict handling)
INSERT OR IGNORE INTO users (email, name) VALUES ('a@b.com', 'Alice2');
INSERT OR REPLACE INTO users (email, name) VALUES ('a@b.com', 'Alice2');
INSERT INTO users (email, name) VALUES ('a@b.com', 'Alice2')
ON CONFLICT(email) DO UPDATE SET name = excluded.name;
-- ── SELECT ──
SELECT * FROM users;
SELECT name, email FROM users WHERE age > 25;
SELECT DISTINCT age FROM users ORDER BY age DESC;
-- Pagination
SELECT * FROM users LIMIT 10 OFFSET 20;
-- ── UPDATE ──
UPDATE users SET age = 31 WHERE name = 'Alice';
UPDATE users SET active = 0, updated_at = datetime('now')
WHERE age < 25;
-- ── DELETE ──
DELETE FROM users WHERE id = 3;
DELETE FROM users WHERE active = 0;
DELETE FROM users; -- deletes ALL rows (fast in SQLite)
-- ── UPSERT (SQLite 3.24+) ──
INSERT INTO users (name, email, age) VALUES ('Grace', 'grace@h.com', 22)
ON CONFLICT(email) DO UPDATE SET
name = excluded.name,
age = excluded.age;
| Operator | Meaning | Example |
|---|
| = | Equal | name = 'Alice' |
| != / <> | Not equal | status != 'deleted' |
| > < >= <= | Comparison | age >= 18 |
| BETWEEN | Range inclusive | age BETWEEN 18 AND 65 |
| IN | Set membership | city IN ('NYC','LA') |
| NOT IN | Not in set | status NOT IN ('banned') |
| LIKE | Pattern match | name LIKE 'A%' |
| GLOB | Case-sensitive match | name GLOB 'A*' |
| IS NULL | Null check | email IS NULL |
| IS NOT NULL | Not null | avatar IS NOT NULL |
| Pattern | Matches |
|---|
| A% | Starts with A |
| %son | Ends with "son" |
| %oh% | Contains "oh" |
| _ike | "ike" (4 chars total) |
| A___ | "A" + exactly 3 chars |
⚠️UPSERT with ON CONFLICT is the modern way to handle duplicates. Use INSERT OR REPLACE carefully — it deletes the old row first, which triggers DELETE triggers and resets AUTOINCREMENT.
-- ── Setup tables ──
CREATE TABLE orders (
id INTEGER PRIMARY KEY,
user_id INTEGER NOT NULL REFERENCES users(id),
total REAL NOT NULL,
status TEXT DEFAULT 'pending',
created_at TEXT DEFAULT (datetime('now'))
);
CREATE TABLE order_items (
id INTEGER PRIMARY KEY,
order_id INTEGER NOT NULL REFERENCES orders(id),
product_id INTEGER NOT NULL,
qty INTEGER NOT NULL DEFAULT 1,
price REAL NOT NULL
);
-- ── INNER JOIN ──
SELECT u.name, o.id, o.total, o.status
FROM users u
INNER JOIN orders o ON u.id = o.user_id
WHERE o.status = 'completed';
-- ── LEFT JOIN (all users + their orders) ──
SELECT u.name, COUNT(o.id) AS order_count,
COALESCE(SUM(o.total), 0) AS total_spent
FROM users u
LEFT JOIN orders o ON u.id = o.user_id
GROUP BY u.id, u.name
ORDER BY total_spent DESC;
-- ── CROSS JOIN ──
SELECT u.name, p.name AS product
FROM users u CROSS JOIN products p;
-- ── Self Join (hierarchical data) ──
CREATE TABLE categories (
id INTEGER PRIMARY KEY,
name TEXT,
parent_id INTEGER REFERENCES categories(id)
);
SELECT c.name, p.name AS parent
FROM categories c
LEFT JOIN categories p ON c.parent_id = p.id;
-- ── Subquery in WHERE ──
SELECT * FROM users
WHERE id IN (SELECT user_id FROM orders WHERE total > 500);
-- ── Correlated Subquery ──
SELECT u.name,
(SELECT COUNT(*) FROM orders o WHERE o.user_id = u.id) AS order_count
FROM users u;
-- ── CTE (WITH clause) ──
WITH user_totals AS (
SELECT user_id, SUM(total) AS total_spent
FROM orders
GROUP BY user_id
HAVING total_spent > 1000
)
SELECT u.name, ut.total_spent
FROM users u
INNER JOIN user_totals ut ON u.id = ut.user_id;
-- ── Recursive CTE (hierarchical) ──
WITH RECURSIVE hierarchy AS (
SELECT id, name, parent_id, 1 AS level
FROM categories WHERE parent_id IS NULL
UNION ALL
SELECT c.id, c.name, c.parent_id, h.level + 1
FROM categories c
INNER JOIN hierarchy h ON c.parent_id = h.id
)
SELECT * FROM hierarchy ORDER BY level;
| Type | Behavior |
|---|
| INNER JOIN | Only matching rows from both tables |
| LEFT JOIN | All left rows + matching right (NULL if no match) |
| CROSS JOIN | Cartesian product (every combo) |
| USING (col) | Shortcut: ON a.col = b.col |
| NATURAL JOIN | Auto-match columns with same name |
| Function | Description |
|---|
| COUNT(*) | Count all rows |
| COUNT(col) | Count non-NULL values |
| SUM(col) | Total of numeric values |
| AVG(col) | Average value |
| MIN(col) | Minimum value |
| MAX(col) | Maximum value |
| GROUP_CONCAT(col) | Concatenate values (comma-separated) |
| TOTAL(col) | Float sum (always returns REAL) |
-- ── AFTER INSERT Trigger (audit log) ──
CREATE TABLE audit_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
table_name TEXT NOT NULL,
action TEXT NOT NULL,
row_id INTEGER,
old_data TEXT,
new_data TEXT,
changed_at TEXT DEFAULT (datetime('now'))
);
CREATE TRIGGER trg_users_after_insert
AFTER INSERT ON users
FOR EACH ROW
BEGIN
INSERT INTO audit_log (table_name, action, row_id, new_data)
VALUES ('users', 'INSERT', NEW.id,
json_object('name', NEW.name, 'email', NEW.email));
END;
-- ── AFTER UPDATE Trigger ──
CREATE TRIGGER trg_users_after_update
AFTER UPDATE ON users
FOR EACH ROW
WHEN OLD.name != NEW.name OR OLD.email != NEW.email
BEGIN
INSERT INTO audit_log (table_name, action, row_id, old_data, new_data)
VALUES ('users', 'UPDATE', NEW.id,
json_object('name', OLD.name, 'email', OLD.email),
json_object('name', NEW.name, 'email', NEW.email));
END;
-- ── BEFORE DELETE Trigger ──
CREATE TRIGGER trg_orders_before_delete
BEFORE DELETE ON orders
FOR EACH ROW
BEGIN
INSERT INTO audit_log (table_name, action, row_id, old_data)
VALUES ('orders', 'DELETE', OLD.id,
json_object('total', OLD.total, 'status', OLD.status));
END;
-- ── List Triggers ──
SELECT name, tbl_name, sql FROM sqlite_master WHERE type = 'trigger';
-- ── Drop Trigger ──
DROP TRIGGER IF EXISTS trg_users_after_insert;
-- ── VIEW ──
CREATE VIEW user_order_summary AS
SELECT
u.id, u.name, u.email,
COUNT(o.id) AS total_orders,
COALESCE(SUM(o.total), 0) AS total_spent,
MAX(o.created_at) AS last_order
FROM users u
LEFT JOIN orders o ON u.id = o.user_id
GROUP BY u.id;
-- Use a view like a table
SELECT * FROM user_order_summary WHERE total_spent > 500;
-- ── Materialized View (manual refresh) ──
CREATE TABLE mv_user_stats AS
SELECT user_id, COUNT(*) AS order_count, SUM(total) AS total_spent
FROM orders GROUP BY user_id;
-- Refresh:
DELETE FROM mv_user_stats;
INSERT INTO mv_user_stats
SELECT user_id, COUNT(*) AS order_count, SUM(total) AS total_spent
FROM orders GROUP BY user_id;
-- ── Drop View ──
DROP VIEW IF EXISTS user_order_summary;
💡Use the WHEN clause in triggers to avoid unnecessary trigger execution. Triggers in SQLite run synchronously and can significantly impact INSERT/UPDATE performance on high-write tables.
-- ── Window Functions (SQLite 3.25+) ──
-- Running total per user
SELECT
o.id, o.user_id, o.total,
SUM(o.total) OVER (
PARTITION BY o.user_id
ORDER BY o.created_at
ROWS UNBOUNDED PRECEDING
) AS running_total
FROM orders o;
-- Rank orders by total
SELECT
id, user_id, total,
RANK() OVER (ORDER BY total DESC) AS rank,
DENSE_RANK() OVER (ORDER BY total DESC) AS dense_rank,
ROW_NUMBER() OVER (ORDER BY total DESC) AS row_num
FROM orders;
-- Moving average
SELECT
date, sales,
AVG(sales) OVER (
ORDER BY date
ROWS BETWEEN 2 PRECEDING AND CURRENT ROW
) AS moving_avg_3day
FROM daily_sales;
-- LAG / LEAD
SELECT
date, sales,
LAG(sales, 1) OVER (ORDER BY date) AS prev_day,
LEAD(sales, 1) OVER (ORDER BY date) AS next_day,
sales - LAG(sales, 1) OVER (ORDER BY date) AS diff
FROM daily_sales;
-- ── JSON Functions (SQLite 3.38+) ──
CREATE TABLE events (
id INTEGER PRIMARY KEY,
data TEXT NOT NULL -- JSON stored as TEXT
);
INSERT INTO events (data) VALUES (
'{"type":"click","page":"/home","user":42,"ts":"2024-01-15T10:30:00Z"}'
);
-- Extract JSON values
SELECT json_extract(data, '$.type') AS event_type,
json_extract(data, '$.user') AS user_id,
json_extract(data, '$.page') AS page
FROM events;
-- JSON path with conditions
SELECT * FROM events
WHERE json_extract(data, '$.type') = 'click'
AND json_extract(data, '$.user') = 42;
-- JSON array operations
SELECT json_array_length(data, '$.items') AS item_count
FROM orders_json;
-- Modify JSON
SELECT json_set(data, '$.status', 'processed')
FROM events WHERE id = 1;
-- JSON aggregate
SELECT json_group_array(name) AS user_names FROM users;
SELECT json_object('count', COUNT(*), 'avg', AVG(age)) FROM users;
-- Pretty print JSON
SELECT json_pretty(data) FROM events LIMIT 1;
| Frame | Rows Included |
|---|
| ROWS UNBOUNDED PRECEDING | Current row + all before |
| ROWS 3 PRECEDING | 3 rows before + current |
| ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING | Previous, current, next |
| RANGE UNBOUNDED PRECEDING | Same value group + all before |
| ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING | Current + all after |
| Function | Description |
|---|
| ROW_NUMBER() | Sequential number (1,2,3...) |
| RANK() | Rank with gaps for ties |
| DENSE_RANK() | Rank without gaps |
| LAG(col, n) | Value n rows before |
| LEAD(col, n) | Value n rows after |
| FIRST_VALUE(col) | First value in window |
| LAST_VALUE(col) | Last value in window |
| NTH_VALUE(col, n) | Nth value in window |
-- ── Essential PRAGMAs ──
PRAGMA journal_mode = WAL; -- Write-Ahead Logging
PRAGMA synchronous = NORMAL; -- Fast + safe with WAL
PRAGMA cache_size = -10000; -- 10 MB cache
PRAGMA busy_timeout = 5000; -- 5s lock wait
PRAGMA foreign_keys = ON; -- Enforce FK constraints
PRAGMA case_sensitive_like = ON; -- Case-sensitive LIKE
-- ── Schema introspection ──
PRAGMA table_list; -- List all tables
PRAGMA table_info(users); -- Column details
PRAGMA foreign_key_list(orders); -- FK references
PRAGMA index_list(users); -- Table indexes
PRAGMA index_info(idx_users_email); -- Index columns
PRAGMA database_list; -- Attached databases
-- ── Date & Time Functions ──
SELECT datetime('now'); -- 2024-01-15 10:30:00
SELECT date('now'); -- 2024-01-15
SELECT time('now'); -- 10:30:00
SELECT datetime('now', '+7 days');
SELECT datetime('now', '-1 month');
SELECT datetime('now', '+3 hours', '-30 minutes');
SELECT strftime('%Y-%m-%d %H:%M', 'now');
SELECT date('now', 'start of month');
SELECT date('now', 'start of year', '+6 months');
-- ── String Functions ──
SELECT UPPER('hello'), LOWER('WORLD');
SELECT TRIM(' spaces '), LTRIM(' left'), RTRIM('right ');
SELECT SUBSTR('abcdef', 1, 3); -- 'abc'
SELECT REPLACE('hello world', 'world', 'SQLite');
SELECT INSTR('hello', 'll'); -- 3 (1-based)
SELECT LENGTH('hello'); -- 5
SELECT printf('User %s has $%.2f', 'Alice', 42.5);
SELECT LIKE('AL%', 'ALICE'); -- 1 (true)
-- ── Math Functions ──
SELECT ABS(-5), ROUND(3.14159, 2), CEIL(4.1), FLOOR(4.9);
SELECT MIN(1,2,3), MAX(1,2,3);
SELECT RANDOM(); -- Random 64-bit integer
SELECT ZEROBLOB(16); -- 16 bytes of zeros
-- ── Type Checking ──
SELECT typeof(42); -- 'integer'
SELECT typeof(3.14); -- 'real'
SELECT typeof('hello'); -- 'text'
SELECT typeof(X'FF'); -- 'blob'
SELECT typeof(NULL); -- 'null'
⚠️Always enable foreign keys with PRAGMA foreign_keys = ON; at the start of every connection. SQLite does NOT enforce foreign key constraints by default for backwards compatibility.
Q: When should you use SQLite vs PostgreSQL?SQLite is ideal for embedded apps, mobile (iOS/Android), prototyping, testing, and read-heavy workloads with a single writer. PostgreSQL is better for concurrent writes, complex queries, large datasets, client-server architecture, and advanced features like full-text search, extensions, and JSONB.
Q: How does SQLite handle concurrent access?SQLite uses file-level locking. With WAL mode, multiple readers and one writer can operate simultaneously. Readers never block writers and writers never block readers. Without WAL, readers block writers and vice versa. Use BEGIN IMMEDIATE to acquire the write lock early.
Q: What is WAL mode and why is it important?WAL (Write-Ahead Logging) appends changes to a separate log file instead of modifying the database directly. Benefits: concurrent reads during writes, better crash recovery, and faster commits. The WAL file is checkpointed back to the main DB periodically.
Q: Explain SQLite type affinity.SQLite uses dynamic typing with 5 storage classes: INTEGER, REAL, TEXT, BLOB, NULL. Column types are 'affinities' that influence storage. For example, a TEXT column stores integers as TEXT. Use STRICT tables (3.37+) to enforce strict type checking.
Q: How do you optimize SQLite performance?Key optimizations: (1) WAL mode, (2) appropriate indexes on WHERE/JOIN columns, (3) PRAGMA cache_size increase, (4) batch INSERTs inside transactions, (5) prepared statements for repeated queries, (6) ANALYZE after bulk data loads, (7) normalize or denormalize based on query patterns.
Q: What is the difference between DELETE and DROP?DELETE removes rows but keeps the table structure and metadata. DROP removes the table entirely including schema. TRUNCATE in SQLite is achieved via DELETE FROM table (which is optimized to not fire per-row DELETE triggers).
Q: How do triggers work in SQLite?Triggers are automatic actions fired on INSERT, UPDATE, or DELETE. Use FOR EACH ROW to execute per-row. The WHEN clause adds a condition. Use NEW.column and OLD.column to reference row data. Triggers are synchronous and can cascade.
Q: What are CTEs and when should you use them?Common Table Expressions (WITH clause) create temporary named result sets. Use them to: (1) improve readability over subqueries, (2) enable recursive queries (hierarchical data), (3) break complex queries into logical steps. Recursive CTEs use UNION ALL with a base case and recursive step.
Q: How does SQLite ensure ACID compliance?Atomicity: transactions wrap changes; rollback undoes all or none. Consistency: constraints (FK, CHECK, UNIQUE) enforce rules. Isolation: locking (DEFERRED/IMMEDIATE/EXCLUSIVE) and WAL provide isolation levels. Durability: journal/WAL persists changes before commit returns.
💡Top SQLite interview topics: WAL mode vs rollback journal, type affinity, concurrency model, PRAGMA settings, window functions, CTEs (including recursive), UPSERT patterns, trigger design, JSON support, and when to choose SQLite over client-server databases.