How to run: Save the following code as calculator.py and run using python calculator.py or python3 calculator.py.
def add(a, b):
return a + b
def subtract(a, b):
return a - b
def multiply(a, b):
return a * b
def divide(a, b):
if b != 0:
return a / b
else:
return "Error! Division by zero."
print("Select operation:")
print("1. Add")
print("2. Subtract")
print("3. Multiply")
print("4. Divide")
choice = input("Enter choice (1/2/3/4): ")
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
if choice == '1':
print("Result:", add(num1, num2))
elif choice == '2':
print("Result:", subtract(num1, num2))
elif choice == '3':
print("Result:", multiply(num1, num2))
elif choice == '4':
print("Result:", divide(num1, num2))
else:
print("Invalid input")
How to run: Save the following code as tic_tac_toe.py and run using python tic_tac_toe.py or python3 tic_tac_toe.py.
board = [" " for _ in range(9)]
def print_board():
print(board[0] + "|" + board[1] + "|" + board[2])
print("-+-+-")
print(board[3] + "|" + board[4] + "|" + board[5])
print("-+-+-")
print(board[6] + "|" + board[7] + "|" + board[8])
def check_winner(player):
combos = [(0,1,2), (3,4,5), (6,7,8),
(0,3,6), (1,4,7), (2,5,8),
(0,4,8), (2,4,6)]
for a, b, c in combos:
if board[a] == board[b] == board[c] == player:
return True
return False
def play():
player = "X"
for turn in range(9):
print_board()
move = int(input(f"Player {player}, enter position (0-8): "))
if board[move] == " ":
board[move] = player
if check_winner(player):
print_board()
print(f"Player {player} wins!")
return
player = "O" if player == "X" else "X"
else:
print("Spot taken, try again.")
print_board()
print("It's a tie!")
play()
How to run: Save the following code as todo.py and run using python todo.py. It will create a file tasks.txt to store your tasks.
def show_menu():
print("To-Do List App")
print("1. Show tasks")
print("2. Add task")
print("3. Delete task")
print("4. Exit")
def read_tasks():
try:
with open("tasks.txt", "r") as f:
return f.read().splitlines()
except FileNotFoundError:
return []
def write_tasks(tasks):
with open("tasks.txt", "w") as f:
for task in tasks:
f.write(task + "\n")
def main():
while True:
show_menu()
choice = input("Enter your choice: ")
if choice == "1":
tasks = read_tasks()
if tasks:
print("Tasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
else:
print("No tasks found.")
elif choice == "2":
task = input("Enter a new task: ")
tasks = read_tasks()
tasks.append(task)
write_tasks(tasks)
print("Task added.")
elif choice == "3":
tasks = read_tasks()
if tasks:
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
index = int(input("Enter task number to delete: "))
if 0 < index <= len(tasks):
removed = tasks.pop(index - 1)
write_tasks(tasks)
print(f"Deleted: {removed}")
else:
print("Invalid number.")
else:
print("No tasks to delete.")
elif choice == "4":
print("Goodbye!")
break
else:
print("Invalid choice. Try again.")
main()
How to run: Save the following code as contact_book.py and run using python contact_book.py. Contacts are saved in contacts.txt.
def show_menu():
print("\nContact Book")
print("1. Add Contact")
print("2. View Contacts")
print("3. Search Contact")
print("4. Exit")
def add_contact():
name = input("Enter name: ")
phone = input("Enter phone number: ")
with open("contacts.txt", "a") as f:
f.write(f"{name},{phone}\n")
print("Contact saved.")
def view_contacts():
try:
with open("contacts.txt", "r") as f:
lines = f.readlines()
if lines:
for line in lines:
name, phone = line.strip().split(",")
print(f"Name: {name}, Phone: {phone}")
else:
print("No contacts found.")
except FileNotFoundError:
print("No contact file found.")
def search_contact():
keyword = input("Enter name to search: ").lower()
found = False
try:
with open("contacts.txt", "r") as f:
for line in f:
name, phone = line.strip().split(",")
if keyword in name.lower():
print(f"Found - Name: {name}, Phone: {phone}")
found = True
if not found:
print("No matching contact found.")
except FileNotFoundError:
print("No contact file found.")
def main():
while True:
show_menu()
choice = input("Enter choice: ")
if choice == "1":
add_contact()
elif choice == "2":
view_contacts()
elif choice == "3":
search_contact()
elif choice == "4":
print("Goodbye!")
break
else:
print("Invalid input")
main()
How to run: Save the code below as palindrome.py and run using python palindrome.py. Enter any string to check if it is a palindrome.
def is_palindrome(text):
cleaned = ''.join(c.lower() for c in text if c.isalnum())
return cleaned == cleaned[::-1]
def main():
print("Palindrome Checker")
while True:
user_input = input("Enter a word or phrase (or 'exit' to quit): ")
if user_input.lower() == "exit":
break
if is_palindrome(user_input):
print("Yes, it's a palindrome!")
else:
print("No, it's not a palindrome.")
main()
How to run: Save the code below as guessing_game.py and run with python guessing_game.py. Try to guess the number randomly chosen by the computer.
import random
def main():
print("Number Guessing Game")
number = random.randint(1, 100)
attempts = 0
while True:
guess = input("Guess a number between 1 and 100 (or 'q' to quit): ")
if guess.lower() == 'q':
print(f"The number was {number}")
break
if not guess.isdigit():
print("Please enter a valid number.")
continue
guess = int(guess)
attempts += 1
if guess < number:
print("Too low!")
elif guess > number:
print("Too high!")
else:
print(f"Correct! You guessed it in {attempts} attempts.")
break
main()
How to run: Save the following code as prime_generator.py and run using python prime_generator.py. Enter a limit to generate all primes up to that number.
def is_prime(n):
if n < 2:
return False
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
def generate_primes(limit):
primes = []
for num in range(2, limit + 1):
if is_prime(num):
primes.append(num)
return primes
def main():
print("Prime Number Generator")
try:
limit = int(input("Generate primes up to: "))
primes = generate_primes(limit)
print("Primes:", primes)
except ValueError:
print("Please enter a valid number.")
main()
How to run: Save as fibonacci.py and run using python fibonacci.py. Enter how many Fibonacci numbers to display.
def fibonacci(n):
sequence = []
a, b = 0, 1
for _ in range(n):
sequence.append(a)
a, b = b, a + b
return sequence
def main():
print("Fibonacci Sequence Visualizer")
try:
count = int(input("Enter number of terms: "))
result = fibonacci(count)
print("Fibonacci sequence:")
print(result)
except ValueError:
print("Please enter a valid number.")
main()
How to run: Save as temp_converter.py and run with python temp_converter.py. Choose to convert from Celsius to Fahrenheit or vice versa.
def c_to_f(c):
return (c * 9/5) + 32
def f_to_c(f):
return (f - 32) * 5/9
def main():
print("Temperature Converter")
print("1. Celsius to Fahrenheit")
print("2. Fahrenheit to Celsius")
choice = input("Choose option (1/2): ")
try:
temp = float(input("Enter temperature: "))
if choice == "1":
print(f"{temp}°C is {c_to_f(temp):.2f}°F")
elif choice == "2":
print(f"{temp}°F is {f_to_c(temp):.2f}°C")
else:
print("Invalid choice.")
except ValueError:
print("Please enter a numeric value.")
main()
How to run: Save as password_generator.py and run with python password_generator.py. Enter desired password length.
import random
import string
def generate_password(length):
characters = string.ascii_letters + string.digits + string.punctuation
return ''.join(random.choice(characters) for _ in range(length))
def main():
print("Password Generator")
try:
length = int(input("Enter password length: "))
password = generate_password(length)
print("Generated password:", password)
except ValueError:
print("Please enter a valid number.")
main()
How to run: Save the following as anagram_checker.py and run using python anagram_checker.py. Enter two words to check if they are anagrams.
def is_anagram(word1, word2):
return sorted(word1.lower()) == sorted(word2.lower())
def main():
print("Anagram Checker")
w1 = input("Enter first word: ")
w2 = input("Enter second word: ")
if is_anagram(w1, w2):
print("They are anagrams!")
else:
print("They are not anagrams.")
main()
How to run: Save as word_counter.py. Run with python word_counter.py. Enter the path to a text file to count words.
def count_words_in_file(filename):
try:
with open(filename, 'r') as file:
text = file.read()
words = text.split()
return len(words)
except FileNotFoundError:
return None
def main():
print("Text File Word Counter")
filename = input("Enter file path: ")
count = count_words_in_file(filename)
if count is None:
print("File not found.")
else:
print(f"Total words in file: {count}")
main()
How to run: Save as log_parser.py. Run with python log_parser.py. Enter log file path to parse and count ERROR lines.
def parse_log_file(filename):
try:
with open(filename, 'r') as file:
lines = file.readlines()
error_lines = [line for line in lines if "ERROR" in line]
return len(error_lines)
except FileNotFoundError:
return None
def main():
print("Log File Parser")
filename = input("Enter log file path: ")
error_count = parse_log_file(filename)
if error_count is None:
print("File not found.")
else:
print(f"Number of ERROR lines: {error_count}")
main()
How to run: Save as file_organizer.py. Run with python file_organizer.py. Enter a directory path to organize files by extension into folders.
import os
import shutil
def organize_files_by_extension(folder):
if not os.path.isdir(folder):
print("Invalid directory path.")
return
for filename in os.listdir(folder):
filepath = os.path.join(folder, filename)
if os.path.isfile(filepath):
ext = os.path.splitext(filename)[1][1:] # Get extension without dot
if ext == "":
ext = "no_extension"
dest_dir = os.path.join(folder, ext)
os.makedirs(dest_dir, exist_ok=True)
shutil.move(filepath, os.path.join(dest_dir, filename))
def main():
print("File Organizer - Sort files by extension")
folder = input("Enter directory path to organize: ")
organize_files_by_extension(folder)
print("Files organized.")
main()
How to run: Save as encrypt_decrypt.py. Run with python encrypt_decrypt.py. Enter text and shift number to encrypt/decrypt using Caesar cipher.
def caesar_cipher(text, shift, decrypt=False):
result = ""
for char in text:
if char.isalpha():
start = ord('a') if char.islower() else ord('A')
offset = ord(char) - start
if decrypt:
offset = (offset - shift) % 26
else:
offset = (offset + shift) % 26
result += chr(start + offset)
else:
result += char
return result
def main():
print("Text Encryption/Decryption Tool (Caesar Cipher)")
choice = input("Choose (e)ncrypt or (d)ecrypt: ").lower()
text = input("Enter text: ")
try:
shift = int(input("Enter shift number (e.g., 3): "))
if choice == 'e':
print("Encrypted text:", caesar_cipher(text, shift))
elif choice == 'd':
print("Decrypted text:", caesar_cipher(text, shift, decrypt=True))
else:
print("Invalid choice.")
except ValueError:
print("Please enter a valid number.")
main()
How to run: Save as email_extractor.py. Run with python email_extractor.py. Enter path to text file to extract emails.
import re
def extract_emails(filename):
try:
with open(filename, 'r') as file:
content = file.read()
emails = re.findall(r'[\w\.-]+@[\w\.-]+\.\w+', content)
return set(emails) # unique emails
except FileNotFoundError:
return None
def main():
print("Email Extractor from Text Files")
filename = input("Enter file path: ")
emails = extract_emails(filename)
if emails is None:
print("File not found.")
elif len(emails) == 0:
print("No emails found.")
else:
print("Emails found:")
for email in emails:
print(email)
main()
How to run: Save as news_scraper.py. Install requests and beautifulsoup4 via pip install requests beautifulsoup4. Run with python news_scraper.py.
import requests
from bs4 import BeautifulSoup
def scrape_headlines(url):
response = requests.get(url)
if response.status_code != 200:
print("Failed to retrieve page.")
return []
soup = BeautifulSoup(response.text, 'html.parser')
headlines = []
for item in soup.find_all('h2'): # example: headlines inside h2 tags
headlines.append(item.get_text(strip=True))
return headlines
def main():
print("News Headline Scraper")
url = input("Enter news website URL (e.g., https://www.bbc.com/news): ")
headlines = scrape_headlines(url)
if headlines:
print("Headlines found:")
for idx, headline in enumerate(headlines, 1):
print(f"{idx}. {headline}")
else:
print("No headlines found.")
main()
How to run: Save as stock_tracker.py. Install yfinance via pip install yfinance. Run with python stock_tracker.py. Enter stock ticker symbol.
import yfinance as yf
def get_stock_price(ticker):
stock = yf.Ticker(ticker)
data = stock.history(period="1d")
if data.empty:
return None
return data['Close'][0]
def main():
print("Stock Price Tracker")
ticker = input("Enter stock ticker symbol (e.g., AAPL): ").upper()
price = get_stock_price(ticker)
if price is None:
print("Invalid ticker or no data found.")
else:
print(f"Latest closing price for {ticker}: ${price:.2f}")
main()
How to run: Save as insta_follower_scraper.py. Install selenium and a webdriver (e.g., ChromeDriver). Run with python insta_follower_scraper.py. Enter Instagram username.
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
def get_followers(username):
driver = webdriver.Chrome() # Make sure chromedriver is in PATH
driver.get(f"https://www.instagram.com/{username}/")
time.sleep(5) # wait for page load
try:
followers = driver.find_element(By.XPATH, '//a[contains(@href,"/followers")]/span').get_attribute('title')
driver.quit()
return followers
except Exception as e:
driver.quit()
return None
def main():
print("Instagram Follower Scraper")
username = input("Enter Instagram username: ")
followers = get_followers(username)
if followers:
print(f"{username} has {followers} followers.")
else:
print("Failed to retrieve followers.")
main()
How to run: Save as amazon_price_alert.py. Install requests and beautifulsoup4. Run with python amazon_price_alert.py. Enter Amazon product URL and target price.
import requests
from bs4 import BeautifulSoup
def get_amazon_price(url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36"
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
return None
soup = BeautifulSoup(response.text, 'html.parser')
price = soup.find(id='priceblock_ourprice') or soup.find(id='priceblock_dealprice')
if price:
price_text = price.get_text().strip().replace('$','').replace(',','')
try:
return float(price_text)
except:
return None
return None
def main():
print("Amazon Price Alert Bot")
url = input("Enter Amazon product URL: ")
target = float(input("Enter your target price: "))
current_price = get_amazon_price(url)
if current_price is None:
print("Failed to get price.")
elif current_price <= target:
print(f"Price alert! Current price ${current_price} is at or below target ${target}.")
else:
print(f"Current price ${current_price} is higher than target ${target}.")
main()
How to run: Save as pdf_metadata.py. Install PyPDF2 with pip install PyPDF2. Run with python pdf_metadata.py. Enter path to a PDF file.
import PyPDF2
def extract_pdf_metadata(filepath):
try:
with open(filepath, 'rb') as file:
reader = PyPDF2.PdfFileReader(file)
info = reader.getDocumentInfo()
return info
except FileNotFoundError:
return None
def main():
print("PDF Metadata Extractor")
filepath = input("Enter PDF file path: ")
metadata = extract_pdf_metadata(filepath)
if metadata is None:
print("File not found or could not read.")
else:
print("Metadata:")
for key, value in metadata.items():
print(f"{key}: {value}")
main()
How to run: Save as csv_analyzer.py. Requires a CSV file in the same directory. Run with python csv_analyzer.py.
import csv
def analyze_csv(file_path):
with open(file_path, newline='') as csvfile:
reader = csv.reader(csvfile)
headers = next(reader)
row_count = 0
for _ in reader:
row_count += 1
print(f"Headers: {headers}")
print(f"Total Rows (excluding header): {row_count}")
def main():
print("CSV Analyzer")
file_path = input("Enter path to CSV file: ")
analyze_csv(file_path)
main()
How to run: Save as covid_tracker.py. Install pandas with pip install pandas. Requires a COVID CSV file. Run with python covid_tracker.py.
import pandas as pd
def track_covid(file_path, country):
df = pd.read_csv(file_path)
country_data = df[df['location'] == country]
if country_data.empty:
print("Country not found in data.")
return
latest = country_data.iloc[-1]
print(f"Latest COVID Data for {country}:")
print(f"Date: {latest['date']}")
print(f"New Cases: {latest['new_cases']}")
print(f"Total Cases: {latest['total_cases']}")
def main():
print("COVID-19 Data Tracker")
file_path = input("Enter CSV file path: ")
country = input("Enter country name: ")
track_covid(file_path, country)
main()
How to run: Save as budget_tracker.py. Install pandas and matplotlib. Run with python budget_tracker.py.
import pandas as pd
import matplotlib.pyplot as plt
def track_budget(file_path):
df = pd.read_csv(file_path)
print("Budget Summary:")
print(df.groupby('Category')['Amount'].sum())
df.groupby('Category')['Amount'].sum().plot(kind='pie', autopct='%1.1f%%')
plt.title("Budget Allocation")
plt.ylabel("")
plt.show()
def main():
print("Simple Budget Tracker")
file_path = input("Enter CSV file path (Category,Amount): ")
track_budget(file_path)
main()
How to run: Save as marks_visualizer.py. Install pandas and matplotlib. Use a CSV file with columns: Name, Marks. Run with python marks_visualizer.py.
import pandas as pd
import matplotlib.pyplot as plt
def visualize_marks(file_path):
df = pd.read_csv(file_path)
df.plot(kind='bar', x='Name', y='Marks', color='skyblue')
plt.title("Student Marks")
plt.xlabel("Student Name")
plt.ylabel("Marks")
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()
def main():
print("Student Marks Visualizer")
file_path = input("Enter path to marks CSV file: ")
visualize_marks(file_path)
main()
How to run: Save as movie_ratings.py. Requires matplotlib and a CSV file with Title, Rating. Run with python movie_ratings.py.
import pandas as pd
import matplotlib.pyplot as plt
def plot_movie_ratings(file_path):
df = pd.read_csv(file_path)
df.sort_values(by='Rating', ascending=False, inplace=True)
plt.bar(df['Title'], df['Rating'], color='orange')
plt.xlabel('Movie Title')
plt.ylabel('Rating')
plt.title('Movie Ratings Bar Chart')
plt.xticks(rotation=60)
plt.tight_layout()
plt.show()
def main():
print("Movie Ratings Visualizer")
file_path = input("Enter movie ratings CSV path: ")
plot_movie_ratings(file_path)
main()
How to run: Save as weather_app.py. Install requests. Use your own API key from OpenWeatherMap. Run with python weather_app.py.
import requests
def get_weather(city, api_key):
url = f"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={api_key}&units=metric"
response = requests.get(url)
data = response.json()
if data.get("main"):
print(f"Weather in {city}: {data['weather'][0]['description']}")
print(f"Temperature: {data['main']['temp']}°C")
else:
print("City not found or invalid API key.")
def main():
city = input("Enter city name: ")
api_key = input("Enter your OpenWeatherMap API key: ")
get_weather(city, api_key)
main()
How to run: Save as currency_converter.py. Install requests. Run with python currency_converter.py.
import requests
def convert_currency(from_currency, to_currency, amount):
url = f"https://api.exchangerate-api.com/v4/latest/{from_currency}"
response = requests.get(url)
data = response.json()
rate = data["rates"].get(to_currency)
if rate:
converted = amount * rate
print(f"{amount} {from_currency} = {converted:.2f} {to_currency}")
else:
print("Invalid currency code.")
def main():
from_currency = input("From currency (e.g. USD): ").upper()
to_currency = input("To currency (e.g. EUR): ").upper()
amount = float(input("Amount: "))
convert_currency(from_currency, to_currency, amount)
main()
How to run: Save as github_info.py. Install requests. Run with python github_info.py.
import requests
def get_repo_info(owner, repo):
url = f"https://api.github.com/repos/{owner}/{repo}"
response = requests.get(url)
data = response.json()
if response.status_code == 200:
print(f"Repository: {data['full_name']}")
print(f"Stars: {data['stargazers_count']}")
print(f"Forks: {data['forks_count']}")
print(f"Language: {data['language']}")
print(f"Description: {data['description']}")
else:
print("Repository not found.")
def main():
owner = input("Enter GitHub username: ")
repo = input("Enter repository name: ")
get_repo_info(owner, repo)
main()
How to run: Save as translator.py. Install googletrans==4.0.0rc1. Run with python translator.py.
from googletrans import Translator
def translate_text(text, dest_lang):
translator = Translator()
translated = translator.translate(text, dest=dest_lang)
print(f"Original: {text}")
print(f"Translated ({dest_lang}): {translated.text}")
def main():
text = input("Enter text to translate: ")
dest_lang = input("Enter destination language (e.g. fr, es, de): ")
translate_text(text, dest_lang)
main()
How to run: Save as quote_fetcher.py. Install requests. Run with python quote_fetcher.py.
import requests
def fetch_quote():
url = "https://api.quotable.io/random"
response = requests.get(url)
data = response.json()
print(f"Quote: {data['content']}")
print(f"- {data['author']}")
def main():
print("Fetching Quote of the Day...")
fetch_quote()
main()
How to run: Save as login_system.py. Install bcrypt using pip install bcrypt. Run with python login_system.py.
import bcrypt
users = {}
def register():
username = input("Enter new username: ")
password = input("Enter new password: ").encode('utf-8')
hashed = bcrypt.hashpw(password, bcrypt.gensalt())
users[username] = hashed
print("User registered successfully!")
def login():
username = input("Username: ")
password = input("Password: ").encode('utf-8')
if username in users and bcrypt.checkpw(password, users[username]):
print("Login successful!")
else:
print("Invalid credentials.")
def main():
while True:
choice = input("1. Register 2. Login 3. Exit: ")
if choice == "1":
register()
elif choice == "2":
login()
else:
break
main()
How to run: Save as otp_verifier.py. Run with python otp_verifier.py.
import random
def generate_otp():
otp = random.randint(100000, 999999)
return otp
def verify_otp(sent_otp):
entered = int(input("Enter the OTP: "))
if entered == sent_otp:
print("OTP Verified!")
else:
print("Invalid OTP.")
def main():
otp = generate_otp()
print(f"Your OTP is: {otp}") # Simulate sending via SMS/email
verify_otp(otp)
main()
How to run: Save as captcha_generator.py. Run with python captcha_generator.py.
import random
import string
def generate_captcha(length=6):
captcha = ''.join(random.choices(string.ascii_letters + string.digits, k=length))
return captcha
def main():
captcha = generate_captcha()
print(f"CAPTCHA: {captcha}")
user_input = input("Enter CAPTCHA: ")
if user_input == captcha:
print("CAPTCHA correct!")
else:
print("CAPTCHA incorrect!")
main()
How to run: Save as password_strength.py. Run with python password_strength.py.
import re
def check_strength(password):
length = len(password) >= 8
digit = re.search(r"\d", password)
upper = re.search(r"[A-Z]", password)
lower = re.search(r"[a-z]", password)
symbol = re.search(r"[!@#$%^&*(),.?\":{}|<>]", password)
if all([length, digit, upper, lower, symbol]):
print("Strong password!")
else:
print("Weak password!")
def main():
password = input("Enter your password: ")
check_strength(password)
main()
How to run: Save as file_encryptor.py. Install cryptography using pip install cryptography. Run with python file_encryptor.py.
from cryptography.fernet import Fernet
def generate_key():
key = Fernet.generate_key()
with open("key.key", "wb") as key_file:
key_file.write(key)
def load_key():
return open("key.key", "rb").read()
def encrypt_file(filename):
key = load_key()
f = Fernet(key)
with open(filename, "rb") as file:
data = file.read()
encrypted = f.encrypt(data)
with open(filename, "wb") as file:
file.write(encrypted)
def decrypt_file(filename):
key = load_key()
f = Fernet(key)
with open(filename, "rb") as file:
data = file.read()
decrypted = f.decrypt(data)
with open(filename, "wb") as file:
file.write(decrypted)
# Uncomment the line you want to run:
# generate_key()
# encrypt_file("example.txt")
# decrypt_file("example.txt")
How to run: Save as app.py. Run with python app.py. Install Flask: pip install flask.
from flask import Flask, render_template_string, request
import sqlite3
app = Flask(__name__)
conn = sqlite3.connect('blog.db', check_same_thread=False)
conn.execute('CREATE TABLE IF NOT EXISTS posts (title TEXT, content TEXT)')
@app.route('/')
def index():
posts = conn.execute('SELECT title, content FROM posts').fetchall()
return render_template_string('''
<h2>My Blog</h2>
<a href="/new">New Post</a>
{% for title, content in posts %}
<h3>{{ title }}</h3><p>{{ content }}</p>
{% endfor %}
''', posts=posts)
@app.route('/new', methods=['GET', 'POST'])
def new_post():
if request.method == 'POST':
conn.execute('INSERT INTO posts VALUES (?, ?)', (request.form['title'], request.form['content']))
conn.commit()
return "Post added. <a href='/'>Back</a>"
return '''
<form method="post">
Title: <input name="title"><br>
Content: <textarea name="content"></textarea><br>
<input type="submit">
</form>
'''
app.run(debug=True)
How to run: Save server and client as server.py and client.py. Run server, then multiple clients.
# server.py
import socket
server = socket.socket()
server.bind(("localhost", 12345))
server.listen(5)
clients = []
def broadcast(msg):
for c in clients:
c.send(msg)
print("Server started...")
while True:
conn, addr = server.accept()
clients.append(conn)
conn.send(b"Connected to chat.")
while True:
msg = conn.recv(1024)
broadcast(msg)
# client.py
import socket
client = socket.socket()
client.connect(("localhost", 12345))
def listen():
while True:
print(client.recv(1024).decode())
import threading
threading.Thread(target=listen).start()
while True:
msg = input()
client.send(msg.encode())
How to run: Save as face_detect.py. Install OpenCV with pip install opencv-python. Run with python face_detect.py.
import cv2
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cam = cv2.VideoCapture(0)
while True:
ret, frame = cam.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.imshow('Face Detector', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
How to run: Save as voice_assistant.py. Install speechrecognition pyaudio with pip. Run with mic connected.
import speech_recognition as sr
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something...")
audio = r.listen(source)
try:
text = r.recognize_google(audio)
print("You said:", text)
except:
print("Could not recognize.")
How to run: Save as email_sender.py. Use Gmail SMTP. Turn on “Allow Less Secure Apps” if required.
import smtplib
from email.mime.text import MIMEText
sender = "your_email@gmail.com"
receiver = "receiver@example.com"
password = "your_password"
msg = MIMEText("Hello, this is a test email.")
msg['Subject'] = "Test Email"
msg['From'] = sender
msg['To'] = receiver
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server.login(sender, password)
server.sendmail(sender, receiver, msg.as_string())
server.quit()
print("Email sent!")
How to run: Save as iris_classifier.py. Install required modules with pip install sklearn.
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
accuracy = model.score(X_test, y_test)
print("Model accuracy:", accuracy)
How to run: Save as house_price.py. Requires CSV file. Install modules with pip install pandas scikit-learn.
import pandas as pd
from sklearn.linear_model import LinearRegression
data = pd.read_csv('house_data.csv') # columns: size, price
X = data[['size']]
y = data['price']
model = LinearRegression()
model.fit(X, y)
size = [[2000]]
price = model.predict(size)
print("Predicted price for 2000 sq.ft:", price[0])
How to run: Save as digit_recognizer.py. Install modules with pip install sklearn.
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
digits = load_digits()
X = digits.data
y = digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = RandomForestClassifier()
model.fit(X_train, y_train)
accuracy = model.score(X_test, y_test)
print("Accuracy:", accuracy)
How to run: Save as spam_detector.py. Install modules: pip install pandas scikit-learn.
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
data = pd.read_csv('spam.csv', encoding='latin-1')[['v1','v2']]
data.columns = ['label', 'text']
X_train, X_test, y_train, y_test = train_test_split(data['text'], data['label'])
cv = CountVectorizer()
X_train_vec = cv.fit_transform(X_train)
X_test_vec = cv.transform(X_test)
model = MultinomialNB()
model.fit(X_train_vec, y_train)
print("Accuracy:", model.score(X_test_vec, y_test))
How to run: Save as movie_recommender.py. Requires CSV. Install pandas scikit-learn.
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
movies = pd.read_csv("movies.csv") # columns: title, description
tfidf = TfidfVectorizer(stop_words='english')
tfidf_matrix = tfidf.fit_transform(movies['description'])
cos_sim = cosine_similarity(tfidf_matrix, tfidf_matrix)
def recommend(title):
idx = movies[movies['title'] == title].index[0]
sim_scores = list(enumerate(cos_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
for i in sim_scores[1:6]:
print(movies.iloc[i[0]]['title'])
recommend("The Matrix")
How to run: Save as churn_prediction.py. Download dataset from Kaggle ("Telco Customer Churn"). Install dependencies with pip install pandas scikit-learn.
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
# Load dataset
data = pd.read_csv('Telco-Customer-Churn.csv')
# Encode categorical variables
label_enc = LabelEncoder()
for column in data.columns:
if data[column].dtype == 'object':
data[column] = label_enc.fit_transform(data[column])
# Define features and label
X = data.drop('Churn', axis=1)
y = data['Churn']
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Train Random Forest model
model = RandomForestClassifier()
model.fit(X_train, y_train)
# Evaluate model
predictions = model.predict(X_test)
print(classification_report(y_test, predictions))
How to run: Save as loan_approval.py. Download dataset from Analytics Vidhya (Loan Prediction Dataset). Install dependencies using pip install pandas scikit-learn.
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
# Load dataset
df = pd.read_csv('loan_data.csv')
# Handle missing values
df.fillna(method='ffill', inplace=True)
# Encode categorical variables
label_enc = LabelEncoder()
for col in df.select_dtypes(include='object').columns:
df[col] = label_enc.fit_transform(df[col])
# Features and target
X = df.drop('Loan_Status', axis=1)
y = df['Loan_Status']
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Train model
model = RandomForestClassifier()
model.fit(X_train, y_train)
# Predict and evaluate
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
How to run: Save the code as fraud_detection.py. Download the dataset from Kaggle (Credit Card Fraud). Install the required libraries using pip install pandas scikit-learn.
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from sklearn.metrics import classification_report
# Load dataset
df = pd.read_csv('creditcard.csv')
# Features and target
X = df.drop('Class', axis=1)
y = df['Class']
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Fit Isolation Forest for anomaly detection
model = IsolationForest(contamination=0.001)
model.fit(X_train)
# Predict anomalies (-1 = fraud, 1 = normal)
y_pred = model.predict(X_test)
y_pred = [1 if x == -1 else 0 for x in y_pred] # Convert to binary (1=fraud, 0=normal)
# Evaluate
print(classification_report(y_test, y_pred))
How to run: Save as cats_dogs_classifier.py. Download the dataset from Kaggle ("Dogs vs Cats"). Install required libraries using pip install tensorflow.
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers, models
# Image dimensions
img_width, img_height = 150, 150
# Prepare image data generators
train_datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)
train_generator = train_datagen.flow_from_directory(
'dogs_vs_cats',
target_size=(img_width, img_height),
batch_size=32,
class_mode='binary',
subset='training'
)
validation_generator = train_datagen.flow_from_directory(
'dogs_vs_cats',
target_size=(img_width, img_height),
batch_size=32,
class_mode='binary',
subset='validation'
)
# Build CNN model
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(img_width, img_height, 3)),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D(2, 2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
# Compile model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train model
model.fit(train_generator, epochs=5, validation_data=validation_generator)
# Save model
model.save('cats_dogs_model.h5')
How to run: Save the code as fake_news_detector.py. Download the dataset from Kaggle ("Fake News Dataset"). Install dependencies using pip install pandas scikit-learn.
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
# Load dataset
df = pd.read_csv('fake_news.csv')
# Preprocessing
df = df.dropna()
X = df['text']
y = df['label']
# TF-IDF vectorization
vectorizer = TfidfVectorizer(stop_words='english', max_df=0.7)
X_vec = vectorizer.fit_transform(X)
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_vec, y, test_size=0.2)
# Train classifier
model = LogisticRegression()
model.fit(X_train, y_train)
# Predict and evaluate
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
How to run: Save as emotion_detection.py. Download the Emotion Dataset. Install dependencies with pip install pandas tensorflow scikit-learn.
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
# Load dataset
df = pd.read_csv('emotion_dataset.csv')
# Preprocess data
texts = df['text'].values
labels = df['emotion'].values
# Encode labels
le = LabelEncoder()
labels_enc = le.fit_transform(labels)
# Tokenize texts
tokenizer = Tokenizer(num_words=5000, oov_token='')
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
padded = pad_sequences(sequences, maxlen=100, padding='post')
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(padded, labels_enc, test_size=0.2)
# Build LSTM model
model = Sequential([
Embedding(5000, 64, input_length=100),
LSTM(64, return_sequences=True),
Dropout(0.5),
LSTM(32),
Dense(32, activation='relu'),
Dense(len(le.classes_), activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Train model
model.fit(X_train, y_train, epochs=5, validation_data=(X_test, y_test))
# Save model
model.save('emotion_detection_model.h5')
How to run: Save as time_series_forecast.py. Install dependencies with pip install yfinance pandas matplotlib statsmodels tensorflow scikit-learn.
import yfinance as yf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima.model import ARIMA
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
# Download stock data (e.g. Apple)
data = yf.download('AAPL', start='2020-01-01', end='2023-01-01')
close_prices = data['Close']
# Plot closing price
close_prices.plot(title='AAPL Closing Prices')
plt.show()
# Seasonal decomposition
result = seasonal_decompose(close_prices, model='additive', period=252)
result.plot()
plt.show()
# ARIMA model example (order p,d,q)
arima_model = ARIMA(close_prices, order=(5,1,0))
arima_result = arima_model.fit()
print(arima_result.summary())
# Prepare data for LSTM
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(close_prices.values.reshape(-1,1))
# Create sequences
def create_dataset(dataset, time_step=60):
X, y = [], []
for i in range(len(dataset)-time_step-1):
X.append(dataset[i:(i+time_step), 0])
y.append(dataset[i + time_step, 0])
return np.array(X), np.array(y)
time_step = 60
X, y = create_dataset(scaled_data, time_step)
X = X.reshape(X.shape[0], X.shape[1], 1)
# Build LSTM model
model = Sequential([
LSTM(50, return_sequences=True, input_shape=(time_step,1)),
LSTM(50),
Dense(1)
])
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X, y, epochs=5, batch_size=64)
# Save the model
model.save('time_series_lstm_model.h5')
How to run: Save as house_rent_prediction.py. Download dataset from Kaggle ("House Rent Dataset"). Install dependencies with pip install pandas scikit-learn matplotlib.
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
# Load dataset
df = pd.read_csv('house_rent.csv')
# Preprocess - handle missing values if any
df = df.dropna()
# Select features and target
X = df[['size', 'location', 'bedrooms']] # Example features
# Encode 'location' if categorical
X = pd.get_dummies(X, columns=['location'], drop_first=True)
y = df['rent']
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Scale features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Train linear regression model
model = LinearRegression()
model.fit(X_train_scaled, y_train)
# Predict and evaluate
y_pred = model.predict(X_test_scaled)
print(f'Mean Squared Error: {mean_squared_error(y_test, y_pred)}')
print(f'R2 Score: {r2_score(y_test, y_pred)}')
# Plot predictions
plt.scatter(y_test, y_pred)
plt.xlabel('Actual Rent')
plt.ylabel('Predicted Rent')
plt.title('House Rent Prediction')
plt.show()
How to run: Save as wine_quality_prediction.py. Download Wine Quality Dataset from UCI. Install dependencies with pip install pandas scikit-learn matplotlib seaborn.
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import StandardScaler
# Load dataset
df = pd.read_csv('winequality-red.csv', sep=';')
# Exploratory Data Analysis (EDA)
print(df.describe())
sns.countplot(x='quality', data=df)
plt.show()
# Features and target
X = df.drop('quality', axis=1)
y = df['quality'].apply(lambda x: 1 if x >= 7 else 0) # Binary classification: quality 7+ is good (1), else bad (0)
# Scale features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
# Train Random Forest Classifier
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
# Predictions and evaluation
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
# Feature importance plot
importances = model.feature_importances_
features = X.columns
sns.barplot(x=importances, y=features)
plt.title('Feature Importance')
plt.show()
How to run: Save as sentiment_analysis.py. Install dependencies with pip install pandas scikit-learn nltk tensorflow.
import pandas as pd
import nltk
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout
from tensorflow.keras.utils import to_categorical
nltk.download('punkt')
# Sample data loading
df = pd.read_csv('social_media_posts.csv') # Columns: 'text', 'sentiment'
# Encode sentiment labels
le = LabelEncoder()
df['sentiment_encoded'] = le.fit_transform(df['sentiment'])
# Tokenize text
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(df['text'])
X = tokenizer.texts_to_sequences(df['text'])
X = pad_sequences(X, maxlen=100)
# Prepare labels
y = to_categorical(df['sentiment_encoded'])
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Build LSTM model
model = Sequential([
Embedding(input_dim=5000, output_dim=64, input_length=100),
LSTM(64),
Dropout(0.5),
Dense(y.shape[1], activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=5, batch_size=64, validation_data=(X_test, y_test))
How to run: Save as chatbot.py. Install dependencies with pip install rasa for rule-based or HuggingFace transformers for seq2seq.
# Simple rule-based chatbot example
responses = {
'hi': 'Hello! How can I help you?',
'bye': 'Goodbye! Have a nice day.',
'hours': 'Our working hours are 9 AM to 5 PM, Monday to Friday.'
}
def chatbot_response(message):
message = message.lower()
for key in responses:
if key in message:
return responses[key]
return "Sorry, I don't understand."
while True:
user_input = input("You: ")
if user_input.lower() == 'exit':
print("Chatbot: Goodbye!")
break
print("Chatbot:", chatbot_response(user_input))
How to run: Save as ocr.py. Install dependencies with pip install pytesseract pillow. Also install Tesseract OCR engine on your system.
from PIL import Image
import pytesseract
# Path to tesseract executable (update if needed)
pytesseract.pytesseract.tesseract_cmd = r'/usr/bin/tesseract' # Linux example
# Load image
img = Image.open('sample_text_image.png')
# Extract text
text = pytesseract.image_to_string(img)
print('Extracted Text:')
print(text)
How to run: Save as image_caption.py. Requires TensorFlow and pretrained CNN + RNN models. Install pip install tensorflow pillow numpy.
# Due to complexity, this is a simplified placeholder
print("Image Caption Generator requires complex models.")
print("Use pretrained encoder-decoder architectures from TensorFlow or PyTorch.")
print("You can load models and run inference on images to generate captions.")
How to run: Save as speech_to_text.py. Install dependencies with pip install SpeechRecognition pyaudio. For pyaudio, use system-specific installation.
import speech_recognition as sr
# Initialize recognizer
r = sr.Recognizer()
# Use microphone as source
with sr.Microphone() as source:
print("Please say something...")
audio = r.listen(source)
try:
text = r.recognize_google(audio)
print("You said:", text)
except sr.UnknownValueError:
print("Sorry, could not understand audio.")
except sr.RequestError as e:
print("Could not request results; {0}".format(e))
How to run: Save as text_summarization.py. Install pip install transformers.
from transformers import pipeline
# Load summarization pipeline
summarizer = pipeline("summarization")
text = """
Artificial intelligence is transforming many industries.
It enables automation, enhances decision making, and unlocks new insights from data.
However, challenges remain in ensuring ethical use and managing bias.
"""
summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
print("Summary:", summary[0]['summary_text'])
How to run: Save as recommendation_system.py. Install pip install scikit-learn pandas.
import pandas as pd
from sklearn.neighbors import NearestNeighbors
# Sample user-item matrix
data = {
'item1': [5, 3, 0, 1],
'item2': [4, 0, 0, 1],
'item3': [1, 1, 0, 5],
'item4': [0, 0, 5, 4],
'item5': [0, 1, 5, 4],
}
df = pd.DataFrame(data, index=['User1', 'User2', 'User3', 'User4'])
model = NearestNeighbors(metric='cosine', algorithm='brute')
model.fit(df)
# Recommend items similar to User1
distances, indices = model.kneighbors(df.loc[['User1']], n_neighbors=2)
print("Nearest neighbors for User1:", indices)
How to run: Save as hand_gesture_recognition.py. Install pip install opencv-python mediapipe.
import cv2
import mediapipe as mp
mp_hands = mp.solutions.hands
hands = mp_hands.Hands()
mp_draw = mp.solutions.drawing_utils
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = hands.process(img_rgb)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_draw.draw_landmarks(img, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imshow("Hand Gesture Recognition", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
How to run: Save as face_mask_detector.py. Install pip install opencv-python tensorflow. Use a pretrained model.
# Placeholder code to load webcam and display feed
import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
# Add mask detection code here (model inference)
cv2.putText(frame, "Mask Detector Placeholder", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)
cv2.imshow("Face Mask Detector", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
How to run: Save as object_detection.py. Install pip install opencv-python and download pretrained YOLO/SSD models.
import cv2
# Load YOLO model files (update paths)
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
height, width, channels = frame.shape
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416,416), (0,0,0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# Post-processing to detect objects
# (Add code to parse outs and draw bounding boxes)
cv2.imshow("Object Detection", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
How to run: Save as style_transfer.py. Install pip install torch torchvision pillow.
import torch
import torchvision.transforms as transforms
from PIL import Image
from torchvision.models import vgg19
# Load images, preprocess, and apply style transfer model
# (Use pre-trained models or PyTorch tutorials as reference)
How to run: Save as resume_screening.py. Use NLP libraries like pip install spacy.
import spacy
nlp = spacy.load("en_core_web_sm")
job_description = "Software engineer with Python and ML experience"
resume_text = "Experienced Python developer with machine learning skills"
# Simple keyword matching example
if all(word in resume_text.lower() for word in job_description.lower().split()):
print("Resume matches job description")
else:
print("Resume does not match well")
How to run: Save as fake_image_detection.py. Use deep learning frameworks like TensorFlow or PyTorch.
# Placeholder for deepfake detection model loading and inference
# Typically requires pretrained model and video/image input preprocessing
print("Fake image detection model placeholder")
How to run: Save as code_autocomplete.py. Use NLP models or libraries like Hugging Face transformers.
from transformers import pipeline
autocomplete = pipeline("text-generation", model="microsoft/CodeGPT-small-py")
prompt = "def fibonacci(n):"
result = autocomplete(prompt, max_length=50)
print(result[0]['generated_text'])
How to run: Save as machine_translation.py. Install pip install transformers.
from transformers import MarianMTModel, MarianTokenizer
model_name = 'Helsinki-NLP/opus-mt-en-fr'
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
text = "Hello, how are you?"
inputs = tokenizer(text, return_tensors="pt", padding=True)
translated = model.generate(**inputs)
print(tokenizer.decode(translated[0], skip_special_tokens=True))
How to run: Save as email_classifier.py. Install pip install scikit-learn pandas.
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
import pandas as pd
# Example emails dataset with labels: spam, promotions, primary
data = {'email': ["Buy now", "Meeting tomorrow", "Discount offer"], 'label': ['promotions', 'primary', 'spam']}
df = pd.DataFrame(data)
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(df['email'])
y = df['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = MultinomialNB()
model.fit(X_train, y_train)
print(model.predict(vectorizer.transform(["Special discount just for you"])))
How to run: Save as rl_game_bot.py. Use pip install gym numpy.
import gym
import numpy as np
env = gym.make("CartPole-v1")
state = env.reset()
done = False
while not done:
action = env.action_space.sample() # random action (replace with RL agent logic)
state, reward, done, info = env.step(action)
env.render()
env.close()
How to run: Save as essay_scoring.py. Use NLP libraries like pip install textblob.
from textblob import TextBlob
essay = "This is a well-written essay with clear points."
# Simple grammar and sentiment score as proxy for quality
blob = TextBlob(essay)
grammar_score = len(blob.correct()) # Example usage
sentiment = blob.sentiment.polarity
print(f"Grammar Score: {grammar_score}, Sentiment: {sentiment}")
How to run: Save as emotion_recognition_voice.py. Use libraries like pip install librosa sklearn.
import librosa
from sklearn.svm import SVC
import numpy as np
# Load audio, extract features, train classifier
audio_path = "sample_audio.wav"
y, sr = librosa.load(audio_path)
mfccs = librosa.feature.mfcc(y=y, sr=sr)
X = mfccs.T # features per frame
y_labels = [] # corresponding emotions (placeholder)
model = SVC()
# model.fit(X_train, y_train) # Training with dataset
print("Emotion recognition placeholder")
How to run: Save as medical_image_segmentation.py. Use deep learning frameworks like pip install tensorflow keras.
# Placeholder for U-Net or CNN based segmentation model loading and inference
print("Medical image segmentation model placeholder")
How to run: Save as qa_bert.py. Use pip install transformers torch.
from transformers import pipeline
qa_pipeline = pipeline("question-answering")
context = "OpenAI creates powerful AI models."
question = "Who creates powerful AI models?"
result = qa_pipeline(question=question, context=context)
print(result['answer'])
How to run: Save as news_categorization.py. Use pip install scikit-learn pandas.
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
news = ["Stocks soar in market rally", "Local team wins championship"]
labels = ["business", "sports"]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(news)
model = MultinomialNB()
model.fit(X, labels)
print(model.predict(vectorizer.transform(["Championship game tonight"])))
How to run: Save as ner_tool.py. Use pip install spacy and download model python -m spacy download en_core_web_sm.
import spacy
nlp = spacy.load("en_core_web_sm")
text = "Apple is looking at buying U.K. startup for $1 billion"
doc = nlp(text)
for ent in doc.ents:
print(ent.text, ent.label_)
How to run: Save as tts_converter.py. Use pip install pyttsx3.
import pyttsx3
engine = pyttsx3.init()
engine.say("Hello, this is a text to speech conversion example.")
engine.runAndWait()
How to run: Save as document_clustering.py. Use pip install scikit-learn.
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
documents = [
"The sky is blue",
"The sun is bright",
"The sun in the blue sky is bright"
]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(documents)
kmeans = KMeans(n_clusters=2, random_state=0)
kmeans.fit(X)
print(kmeans.labels_)
How to run: Save as plagiarism_checker.py.
# Simple example comparing two texts for similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
text1 = "This is a sample document."
text2 = "This document is a sample."
vectorizer = TfidfVectorizer().fit_transform([text1, text2])
vectors = vectorizer.toarray()
similarity = cosine_similarity([vectors[0]], [vectors[1]])
print(f"Similarity score: {similarity[0][0]}")
How to run: Save as drone_navigation.py. Use simulation environments like AirSim or Gym.
# Placeholder for drone navigation simulation code
print("Simulate autonomous drone navigation with RL or path planning algorithms")
How to run: Save as sentiment_chatbot.py. Use pip install transformers.
from transformers import pipeline
sentiment = pipeline("sentiment-analysis")
chatbot = pipeline("conversational")
user_input = "I am feeling sad today."
sentiment_result = sentiment(user_input)
print(f"Sentiment: {sentiment_result}")
# Use sentiment info to tailor chatbot response (basic example)
response = "I'm here to help you." if sentiment_result[0]['label'] == 'NEGATIVE' else "Glad to hear that!"
print(response)
How to run: Save as finance_advisor.py. Basic example using budgeting advice rules.
income = 5000
expenses = 3000
savings = income - expenses
if savings < 0:
advice = "Cut your expenses!"
else:
advice = f"Good job! You saved ${savings} this month."
print(advice)
How to run: Save as image_colorization.py. Use deep learning frameworks and pretrained models.
# Placeholder for image colorization using CNN or GAN
print("Load grayscale image and apply colorization model")
How to run: Save as video_summarization.py. Use pip install opencv-python numpy.
import cv2
# Extract keyframes or use pretrained video summarization models
print("Extract key frames to summarize videos")
How to run: Save as fake_review_detector.py. Use NLP models for text classification.
# Train a classifier to detect fake vs real reviews
print("Detect fake reviews using NLP and classification models")
How to run: Save as sign_language_translator.py. Use hand gesture recognition with CNNs.
# Use webcam feed and CNN to recognize signs and translate to text
print("Translate sign language gestures to text")
How to run: Save as real_time_interpreter.py. Use speech-to-text and translation APIs.
# Capture audio, convert speech to text, translate, then synthesize speech in target language
print("Interpret languages in real-time")
How to run: Save as equation_solver.py. Use image recognition + symbolic math libraries.
# Extract handwritten math equation and solve symbolically
print("Solve handwritten equations using OCR and sympy")
How to run: Save as virtual_dressing.py. Use pose estimation + image overlay techniques.
# Overlay clothes on person images using pose detection
print("Simulate virtual dressing with AI and computer vision")
How to run: Save as traffic_sign_recognition.py. Use CNN with traffic sign datasets.
# Train CNN to detect and classify traffic signs in images
print("Recognize traffic signs using deep learning")
How to run: Save as essay_generator.py. Use GPT models to generate essays from prompts.
# Generate essays based on input topics using language models
print("Create essays automatically using AI text generation")
How to run: Save as fraud_detection.py. Use anomaly detection or classification models.
# Detect fraudulent transactions from data using ML
print("Detect fraud in financial transactions")
How to run: Save as disease_diagnosis.py. Use CNNs with medical image datasets.
# Train CNN to classify diseases from MRI or X-ray images
print("Diagnose diseases using medical image analysis")
Objective: Extract key information (name, skills, experience) from resumes using NLP.
How to run: Use libraries like spaCy or NLTK for parsing text.
# Parse resume text and extract entities like name, skills, education
print("Extract key info from resumes using NLP")
Objective: Classify songs by genre using audio features like MFCCs.
How to run: Use librosa for feature extraction and a CNN or Random Forest for classification.
# Extract audio features, train classifier to predict genre
print("Classify music genres using audio features")
Objective: Maintain conversation context for better replies.
How to run: Implement with transformer models like GPT and track session state.
# Track conversation history and generate context-aware replies
print("Chatbot maintaining conversation context")
Objective: Analyze social media trends, sentiments, and hashtags.
How to run: Use Twitter API, sentiment analysis libraries, and data visualization tools.
# Fetch tweets, analyze sentiment, display trend charts
print("Analyze social media trends and sentiments")
Objective: Convert text tone/style (formal ↔ casual).
How to run: Use transformer models fine-tuned for style transfer.
# Input text, model converts style from formal to casual or vice versa
print("Convert text tone or style")
Objective: Track objects frame-by-frame in videos.
How to run: Use OpenCV and pre-trained object detectors like YOLO.
# Detect and track objects in video streams
print("Track objects in video frame-by-frame")
Objective: Speech or text translation with low latency.
How to run: Use pre-trained translation models or APIs integrated with real-time speech recognition.
# Translate speech/text in real-time with minimal delay
print("Real-time language translation")
Objective: Classic digit recognition task using CNNs.
How to run: Use Keras or PyTorch on the MNIST dataset.
# Train CNN to classify handwritten digits from MNIST dataset
print("Recognize handwritten digits with CNN")
Objective: Identify people from camera feed.
How to run: Use OpenCV, face_recognition library, and camera input.
# Detect and recognize faces in real-time video streams
print("Face recognition for security")
Objective: Generate short summaries of long documents.
How to run: Use transformers like BART or T5 fine-tuned for summarization.
# Input document, output concise summary text
print("Summarize long documents automatically")
Objective: Answer questions about images.
How to run: Use models combining CNN and transformers for vision and language.
# Input image and question, model provides answer
print("Answer questions about images")
Objective: Detect fraudulent transactions or activities.
How to run: Use anomaly detection or supervised models on transaction datasets.
# Analyze transaction data to flag fraud
print("Detect fraudulent activities")
Objective: Adapt learning content based on student performance.
How to run: Use user data to customize lessons and quizzes dynamically.
# Personalize learning paths using AI
print("Adaptive learning platform")
Objective: Detect anomalies in medical images like X-rays or MRIs.
How to run: Use CNNs trained on labeled medical imaging datasets.
# Classify medical images for diagnosis support
print("Diagnose medical images with AI")
Objective: Classify emails as spam or not using ML.
How to run: Train classifiers on labeled email datasets using features or embeddings.
# Filter spam emails automatically
print("Email spam filtering with AI")
# AI-Based Speech Emotion Recognition - Sample Python code snippet
# Import necessary libraries
import librosa
import numpy as np
from sklearn.svm import SVC
# Load audio file
audio_path = 'speech.wav' # Path to speech audio file
audio, sr = librosa.load(audio_path, sr=None)
# Extract features (e.g., MFCCs)
mfccs = librosa.feature.mfcc(y=audio, sr=sr, n_mfcc=13)
# Flatten features for classifier input
features = np.mean(mfccs.T, axis=0).reshape(1, -1)
# Dummy model prediction example
model = SVC() # Assume pre-trained SVM loaded here
# prediction = model.predict(features)
print("Emotion detected: Happy (dummy output)")
How to run: Save this Python script and run in a Python environment with librosa and sklearn installed.
# AI-Powered Photo Style Transfer - simplified example using TensorFlow and Hub
import tensorflow_hub as hub
import tensorflow as tf
import numpy as np
from PIL import Image
# Load content and style images
content_image = Image.open('content.jpg')
style_image = Image.open('style.jpg')
# Convert images to tensors
def load_img(img):
img = tf.image.convert_image_dtype(np.array(img), tf.float32)
img = img[tf.newaxis, :]
return img
content = load_img(content_image)
style = load_img(style_image)
# Load style transfer model
hub_model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')
# Stylize image
stylized_image = hub_model(tf.constant(content), tf.constant(style))[0]
print("Style transfer complete (output tensor)")
How to run: Requires TensorFlow and tensorflow_hub packages installed. Run the script in Python environment.
# AI-Powered Automated Code Review Tool - conceptual snippet
# Here we simulate code quality checks using pylint
import subprocess
code_file = 'example.py' # file to check
# Run pylint on the code file
result = subprocess.run(['pylint', code_file], capture_output=True, text=True)
print("Pylint output:")
print(result.stdout)
How to run: Install pylint (`pip install pylint`), save code file, then run this script in the same directory.
# AI-Powered Medical Diagnosis Assistant - basic example using sklearn
from sklearn.ensemble import RandomForestClassifier
import numpy as np
# Example features: symptom presence (1 or 0)
X = np.array([[1,0,1], [0,1,0], [1,1,1], [0,0,1]])
y = np.array([1, 0, 1, 0]) # 1=disease present, 0=absent
model = RandomForestClassifier()
model.fit(X, y)
# Predict on new patient data
new_patient = np.array([[1,0,0]])
prediction = model.predict(new_patient)
print("Disease present" if prediction[0] == 1 else "Disease absent")
How to run: Requires sklearn installed (`pip install scikit-learn`), then run in Python environment.
# AI-Based Real-time Traffic Monitoring - simplified OpenCV snippet
import cv2
# Open video capture (camera or video file)
cap = cv2.VideoCapture('traffic_video.mp4')
while True:
ret, frame = cap.read()
if not ret:
break
# Placeholder for traffic detection logic
cv2.putText(frame, "Traffic Monitoring Running...", (50,50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)
cv2.imshow('Traffic Monitor', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
How to run: Requires OpenCV installed (`pip install opencv-python`). Run in Python environment with video file or camera access.
# AI-Powered Email Spam Classifier - simplified example using sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
# Sample emails
emails = ["Win a free iPhone now", "Meeting schedule at 10 AM", "Lowest prices guaranteed"]
labels = [1, 0, 1] # 1=spam, 0=not spam
# Vectorize text
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(emails)
# Train classifier
model = MultinomialNB()
model.fit(X, labels)
# Predict new email
test_email = ["Limited offer just for you"]
X_test = vectorizer.transform(test_email)
prediction = model.predict(X_test)
print("Spam" if prediction[0] == 1 else "Not Spam")
How to run: Requires sklearn installed. Run in Python environment.
# AI-Powered Virtual Makeup Try-On - Conceptual placeholder
print("Virtual makeup try-on functionality to be implemented using facial landmarks and image processing libraries.")
How to run: This is a conceptual demo; full implementation requires OpenCV and Dlib or Mediapipe for facial landmark detection.
# AI-Based Sentiment Analysis for Product Reviews - example with TextBlob
from textblob import TextBlob
review = "This product is amazing and works perfectly!"
analysis = TextBlob(review)
sentiment = "Positive" if analysis.sentiment.polarity > 0 else "Negative"
print(f"Review sentiment: {sentiment}")
How to run: Requires TextBlob installed (`pip install textblob`). Run in Python environment.
# AI-Based Text Generation for Story Writing - example using GPT-2 via transformers
from transformers import pipeline
generator = pipeline('text-generation', model='gpt2')
prompt = "Once upon a time in a distant kingdom,"
result = generator(prompt, max_length=50, num_return_sequences=1)
print(result[0]['generated_text'])
How to run: Requires transformers library (`pip install transformers`). Run in Python environment.
# AI-Powered Optical Flow Estimation - OpenCV example
import cv2
cap = cv2.VideoCapture('video.mp4')
ret, prev_frame = cap.read()
prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
while True:
ret, frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prev_gray, gray, None,
0.5, 3, 15, 3, 5, 1.2, 0)
# Visualization or processing code here
prev_gray = gray
cv2.imshow('Optical Flow', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
How to run: Requires OpenCV installed (`pip install opencv-python`). Run in Python environment with video file.
# AI-Based Crop Disease Detection - conceptual example using Keras
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
model = load_model('crop_disease_model.h5') # Pretrained model file
img_path = 'leaf.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0) / 255.0
prediction = model.predict(x)
disease = "Diseased" if prediction[0][0] > 0.5 else "Healthy"
print(f"Crop status: {disease}")
How to run: Requires TensorFlow installed, and a trained model file.
# AI-Powered Autonomous Vehicle Simulation - conceptual reinforcement learning setup
import gym
env = gym.make('CarRacing-v0')
observation = env.reset()
done = False
while not done:
action = env.action_space.sample() # Random action (replace with RL agent)
observation, reward, done, info = env.step(action)
env.render()
env.close()
How to run: Requires OpenAI Gym installed (`pip install gym[box2d]`). Run in Python environment.
# AI-Powered Image Super Resolution - using OpenCV DNN Super Resolution module
import cv2
sr = cv2.dnn_superres.DnnSuperResImpl_create()
sr.readModel('ESPCN_x4.pb')
sr.setModel('espcn', 4)
image = cv2.imread('low_res.jpg')
result = sr.upsample(image)
cv2.imwrite('super_res.jpg', result)
print("Super resolution image saved")
How to run: Requires OpenCV-contrib-python installed (`pip install opencv-contrib-python`).
# AI-Based Hand Pose Estimation - using MediaPipe
import cv2
import mediapipe as mp
mp_hands = mp.solutions.hands
hands = mp_hands.Hands()
mp_draw = mp.solutions.drawing_utils
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
img_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = hands.process(img_rgb)
if results.multi_hand_landmarks:
for handLms in results.multi_hand_landmarks:
mp_draw.draw_landmarks(frame, handLms, mp_hands.HAND_CONNECTIONS)
cv2.imshow("Hand Pose Estimation", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
How to run: Requires MediaPipe and OpenCV installed (`pip install mediapipe opencv-python`).
# AI-Powered Chatbot for Mental Health Support - simple example using transformers
from transformers import pipeline
chatbot = pipeline('conversational')
user_input = "I feel anxious lately."
response = chatbot(user_input)
print(f"Chatbot response: {response}")
How to run: Requires transformers installed (`pip install transformers`). Run in Python environment.
This is a simple Django portfolio site to showcase your projects with images, descriptions, and links.
# settings.py (Add 'portfolio' app)
INSTALLED_APPS = [
# ... other apps
'portfolio',
]
# portfolio/models.py
from django.db import models
class Project(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
image = models.ImageField(upload_to='projects/') # Requires Pillow
url = models.URLField(blank=True)
def __str__(self):
return self.title
# portfolio/views.py
from django.shortcuts import render
from .models import Project
def home(request):
projects = Project.objects.all()
return render(request, 'portfolio/home.html', {'projects': projects})
# portfolio/templates/portfolio/home.html
<!DOCTYPE html>
<html>
<head><title>My Portfolio</title></head>
<body>
<h1>My Projects</h1>
{% for project in projects %}
<div>
<h2>{{ project.title }}</h2>
<img src="{{ project.image.url }}" alt="{{ project.title }}" width="300" />
<p>{{ project.description }}</p>
{% if project.url %}
<a href="{{ project.url }}" target="_blank">View Project</a>
{% endif %}
</div>
{% endfor %}
</body>
</html>
# portfolio/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
]
pip install pillowurls.py and settings.py for serving images in development.python manage.py makemigrations and migrate.python3 --version)pip install django)pip install pillow)
django-admin startproject myportfolio
cd myportfolio
python manage.py startapp portfolio
'portfolio' to INSTALLED_APPS in myportfolio/settings.py:
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'portfolio',
]
Project model in portfolio/models.py:
from django.db import models
class Project(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
image = models.ImageField(upload_to='projects/') # requires Pillow
url = models.URLField(blank=True)
def __str__(self):
return self.title
python manage.py makemigrations
python manage.py migrate
python manage.py createsuperuser
portfolio/admin.py for admin access:
from django.contrib import admin
from .models import Project
admin.site.register(Project)
portfolio/views.py to show projects:
from django.shortcuts import render
from .models import Project
def home(request):
projects = Project.objects.all()
return render(request, 'portfolio/home.html', {'projects': projects})
portfolio/templates/portfolio/home.html:
<!DOCTYPE html>
<html>
<head><title>My Portfolio</title></head>
<body>
<h1>My Projects</h1>
{% for project in projects %}
<div>
<h2>{{ project.title }}</h2>
<img src="{{ project.image.url }}" alt="{{ project.title }}" width="300" />
<p>{{ project.description }}</p>
{% if project.url %}
<a href="{{ project.url }}" target="_blank">View Project</a>
{% endif %}
</div>
{% endfor %}
</body>
</html>
portfolio/urls.py:
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
]
myportfolio/urls.py and serve media files:
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('portfolio.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
myportfolio/settings.py:
import os
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
python manage.py runserver
http://127.0.0.1:8000/ in your browser to see your portfolio site.
http://127.0.0.1:8000/admin/ to add projects with images or use the Django shell.
That’s it! You have a simple portfolio website built with Django that displays projects with images and links. You can customize the templates and styling further.
# Create and setup Django project and app
# Terminal commands (not in pre tag, just instructions):
# django-admin startproject myblogproject
# cd myblogproject
# python manage.py startapp blog
# python -m venv env (optional but recommended)
# source env/bin/activate (Linux/Mac) or env\Scripts\activate (Windows)
# pip install django
# settings.py adjustments:
# Add 'blog' to INSTALLED_APPS
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog', # Add blog app here
]
# models.py inside blog app
from django.db import models
from django.contrib.auth.models import User
class Post(models.Model):
title = models.CharField(max_length=200) # Post title
slug = models.SlugField(unique=True) # URL-friendly unique slug
content = models.TextField() # Post content
published_date = models.DateTimeField(auto_now_add=True) # Date published
author = models.ForeignKey(User, on_delete=models.CASCADE) # Author of the post
def __str__(self):
return self.title
class Meta:
ordering = ['-published_date'] # Latest posts first
# admin.py inside blog app
from django.contrib import admin
from .models import Post
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'published_date') # Fields to display in admin list
prepopulated_fields = {'slug': ('title',)} # Auto-fill slug from title
search_fields = ('title', 'content') # Searchable fields
# urls.py inside myblogproject folder (main project)
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')), # Include blog app urls
]
# urls.py inside blog app folder
from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list, name='post_list'), # List all posts
path('post/<slug:slug>/', views.post_detail, name='post_detail'), # Post detail by slug
]
# views.py inside blog app
from django.shortcuts import render, get_object_or_404
from .models import Post
def post_list(request):
posts = Post.objects.all()
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, slug):
post = get_object_or_404(Post, slug=slug)
return render(request, 'blog/post_detail.html', {'post': post})
# templates/blog/post_list.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Blog Posts</title>
</head>
<body>
<h1>Blog Posts</h1>
<ul>
{% raw %}
<!-- Loop through posts -->
{% for post in posts %}
<li>
<a href="{% url 'post_detail' post.slug %}">{{ post.title }}</a>
<small>by {{ post.author.username }} on {{ post.published_date|date:"F j, Y" }}</small>
</li>
{% endfor %}
{% endraw %}
</ul>
</body>
</html>
# templates/blog/post_detail.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{{ post.title }}</title>
</head>
<body>
<h1>{{ post.title }}</h1>
<p>By {{ post.author.username }} on {{ post.published_date|date:"F j, Y" }}</p>
<div>{{ post.content }}</div>
<a href="{% url 'post_list' %}">Back to Posts</a>
</body>
</html>
# Running instructions:
# 1. Create and activate a virtual environment.
# 2. Install Django: pip install django
# 3. Create project and app, as above.
# 4. Add blog app to INSTALLED_APPS.
# 5. Run migrations: python manage.py migrate
# 6. Create superuser: python manage.py createsuperuser
# 7. Run server: python manage.py runserver
# 8. Access admin panel at http://127.0.0.1:8000/admin/ to add blog posts.
# 9. Access blog at http://127.0.0.1:8000/ to see posts list and details.
# models.py in urlshortener app
from django.db import models
import string, random
def generate_short_code():
length = 6
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(length))
class URL(models.Model):
original_url = models.URLField()
short_code = models.CharField(max_length=6, unique=True, default=generate_short_code)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.original_url} - {self.short_code}"
# views.py
from django.shortcuts import render, redirect, get_object_or_404
from .models import URL
from django.http import HttpResponseNotFound
def home(request):
if request.method == 'POST':
original_url = request.POST.get('original_url')
url_obj, created = URL.objects.get_or_create(original_url=original_url)
return render(request, 'urlshortener/home.html', {'short_url': request.build_absolute_uri('/') + url_obj.short_code})
return render(request, 'urlshortener/home.html')
def redirect_url(request, short_code):
url_obj = get_object_or_404(URL, short_code=short_code)
return redirect(url_obj.original_url)
# urls.py in urlshortener app
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('<str:short_code>/', views.redirect_url, name='redirect_url'),
]
# templates/urlshortener/home.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>URL Shortener</title>
</head>
<body>
<h1>URL Shortener</h1>
<form method="post">
{% raw %}{% csrf_token %}{% endraw %}
<input type="url" name="original_url" placeholder="Enter URL" required>
<button type="submit">Shorten</button>
</form>
{% raw %}{% if short_url %}{% endraw %}
<p>Shortened URL: <a href="{{ short_url }}">{{ short_url }}</a></p>
{% raw %}{% endif %}{% endraw %}
</body>
</html>
---
2. Contact Form Email Sender
# settings.py (add email config)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'your_email@gmail.com' # Replace with your email
EMAIL_HOST_PASSWORD = 'your_email_password' # Replace with your email password or app password
# views.py in contact app
from django.core.mail import send_mail
from django.shortcuts import render
from django.conf import settings
def contact(request):
message_sent = False
if request.method == 'POST':
name = request.POST.get('name')
email = request.POST.get('email')
message = request.POST.get('message')
full_message = f"Message from {name} ({email}):\n\n{message}"
send_mail(
subject='Contact Form Message',
message=full_message,
from_email=settings.EMAIL_HOST_USER,
recipient_list=[settings.EMAIL_HOST_USER],
)
message_sent = True
return render(request, 'contact/contact.html', {'message_sent': message_sent})
# urls.py in contact app
from django.urls import path
from . import views
urlpatterns = [
path('', views.contact, name='contact'),
]
# templates/contact/contact.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Contact Us</title>
</head>
<body>
<h1>Contact Us</h1>
{% raw %}{% if message_sent %}{% endraw %}
<p style="color:green;">Your message has been sent successfully!</p>
{% raw %}{% endif %}{% endraw %}
<form method="post">
{% raw %}{% csrf_token %}{% endraw %}
<input type="text" name="name" placeholder="Your Name" required><br>
<input type="email" name="email" placeholder="Your Email" required><br>
<textarea name="message" placeholder="Your Message" required></textarea><br>
<button type="submit">Send</button>
</form>
</body>
</html>
---
3. Expense Tracker with Django & Bootstrap
# models.py in expense app
from django.db import models
class Expense(models.Model):
title = models.CharField(max_length=100)
amount = models.DecimalField(max_digits=10, decimal_places=2)
date = models.DateField()
category = models.CharField(max_length=50)
def __str__(self):
return f"{self.title} - {self.amount}"
# views.py
from django.shortcuts import render, redirect
from .models import Expense
from django.db.models import Sum
def expense_list(request):
expenses = Expense.objects.all().order_by('-date')
total_expense = expenses.aggregate(Sum('amount'))['amount__sum'] or 0
return render(request, 'expense/expense_list.html', {'expenses': expenses, 'total': total_expense})
def add_expense(request):
if request.method == 'POST':
title = request.POST.get('title')
amount = request.POST.get('amount')
date = request.POST.get('date')
category = request.POST.get('category')
Expense.objects.create(title=title, amount=amount, date=date, category=category)
return redirect('expense_list')
return render(request, 'expense/add_expense.html')
# urls.py in expense app
from django.urls import path
from . import views
urlpatterns = [
path('', views.expense_list, name='expense_list'),
path('add/', views.add_expense, name='add_expense'),
]
# templates/expense/expense_list.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Expense Tracker</title>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css" rel="stylesheet">
</head>
<body class="container mt-4">
<h1>Expense Tracker</h1>
<a href="{% raw %}{% url 'add_expense' %}{% endraw %}" class="btn btn-primary mb-3">Add Expense</a>
<table class="table table-bordered">
<thead>
<tr>
<th>Title</th>
<th>Amount</th>
<th>Category</th>
<th>Date</th>
</tr>
</thead>
<tbody>
{% raw %}{% for expense in expenses %}{% endraw %}
<tr>
<td>{{ expense.title }}</td>
<td>${{ expense.amount }}</td>
<td>{{ expense.category }}</td>
<td>{{ expense.date }}</td>
</tr>
{% raw %}{% endfor %}{% endraw %}
</tbody>
</table>
<h3>Total Expense: ${{ total }}</h3>
</body>
</html>
# templates/expense/add_expense.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Add Expense</title>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css" rel="stylesheet">
</head>
<body class="container mt-4">
<h1>Add Expense</h1>
<form method="post">
{% raw %}{% csrf_token %}{% endraw %}
<div class="mb-3">
<label class="form-label">Title</label>
<input type="text" name="title" class="form-control" required>
</div>
<div class="mb-3">
<label class="form-label">Amount</label>
<input type="number" step="0.01" name="amount" class="form-control" required>
</div>
<div class="mb-3">
<label class="form-label">Category</label>
<input type="text" name="category" class="form-control" required>
</div>
<div class="mb-3">
<label class="form-label">Date</label>
<input type="date" name="date" class="form-control" required>
</div>
<button type="submit" class="btn btn-success">Add</button>
</form>
</body>
</html>
# settings.py (Add your email SMTP configuration)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # Use SMTP backend
EMAIL_HOST = 'smtp.gmail.com' # Gmail SMTP server
EMAIL_PORT = 587 # TLS port
EMAIL_USE_TLS = True # Use TLS encryption
EMAIL_HOST_USER = 'your_email@gmail.com' # Your Gmail address
EMAIL_HOST_PASSWORD = 'your_email_password_or_app_password' # Your Gmail password or app password
---
# views.py in your Django app (e.g., contact)
from django.core.mail import send_mail
from django.shortcuts import render
from django.conf import settings
def contact(request):
message_sent = False # Flag to indicate if message sent
if request.method == 'POST':
name = request.POST.get('name') # Get name from form
email = request.POST.get('email') # Get email from form
message = request.POST.get('message') # Get message from form
full_message = f"Message from {name} ({email}):\n\n{message}"
send_mail(
subject='Contact Form Message', # Email subject
message=full_message, # Email body
from_email=settings.EMAIL_HOST_USER, # From email (your Gmail)
recipient_list=[settings.EMAIL_HOST_USER], # To yourself
)
message_sent = True # Mark message as sent
return render(request, 'contact/contact.html', {'message_sent': message_sent})
---
# urls.py in your Django app
from django.urls import path
from . import views
urlpatterns = [
path('', views.contact, name='contact'), # Root URL to contact view
]
---
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Contact Us</title>
<style>
body { font-family: Arial, sans-serif; margin: 20px; }
form { max-width: 400px; }
input, textarea { width: 100%; padding: 8px; margin: 8px 0; box-sizing: border-box; }
button { padding: 10px 15px; background-color: #4CAF50; color: white; border: none; cursor: pointer; }
button:hover { background-color: #45a049; }
.success { color: green; }
</style>
</head>
<body>
<h1>Contact Us</h1>
{% raw %}{% if message_sent %}{% endraw %}
<p class="success">Your message has been sent successfully!</p>
{% raw %}{% endif %}{% endraw %}
<form method="post">
{% raw %}{% csrf_token %}{% endraw %}
<label for="name">Name:</label>
<input type="text" id="name" name="name" placeholder="Your name" required>
<label for="email">Email:</label>
<input type="email" id="email" name="email" placeholder="Your email" required>
<label for="message">Message:</label>
<textarea id="message" name="message" placeholder="Your message" rows="5" required></textarea>
<button type="submit">Send</button>
</form>
</body>
</html>