pathlib is the modern way to handle file paths in Python. Here's why you should use it.

The Old Way

import os
 
# Building paths
path = os.path.join("data", "users", "config.json")
 
# Getting parts
dirname = os.path.dirname(path)
basename = os.path.basename(path)
ext = os.path.splitext(path)[1]
 
# Checking existence
if os.path.exists(path):
    if os.path.isfile(path):
        print("It's a file")

The New Way

from pathlib import Path
 
# Building paths
path = Path("data") / "users" / "config.json"
 
# Getting parts
dirname = path.parent
basename = path.name
ext = path.suffix
 
# Checking existence
if path.exists():
    if path.is_file():
        print("It's a file")

Why pathlib Is Better

1. Object-oriented

path = Path("data/file.txt")
 
# Methods on the object
path.exists()
path.is_file()
path.read_text()
path.write_text("content")

2. Readable path building

# os.path - nested function calls
path = os.path.join(os.path.dirname(__file__), "data", "config.json")
 
# pathlib - operator chaining
path = Path(__file__).parent / "data" / "config.json"

3. Cross-platform by default

# Always correct for the OS
path = Path("data") / "subdir" / "file.txt"
# Windows: data\subdir\file.txt
# Unix: data/subdir/file.txt

Common Operations

Reading and writing

path = Path("data.txt")
 
# Read
content = path.read_text()
bytes_content = path.read_bytes()
 
# Write
path.write_text("Hello")
path.write_bytes(b"Hello")

Path parts

path = Path("/home/user/data/file.txt")
 
path.parent      # /home/user/data
path.name        # file.txt
path.stem        # file
path.suffix      # .txt
path.suffixes    # ['.txt'] (for file.tar.gz: ['.tar', '.gz'])
path.parts       # ('/', 'home', 'user', 'data', 'file.txt')

Modifying paths

path = Path("data/file.txt")
 
# Change extension
new_path = path.with_suffix(".json")  # data/file.json
 
# Change name
new_path = path.with_name("other.txt")  # data/other.txt
 
# Change stem
new_path = path.with_stem("new")  # data/new.txt

Finding files

path = Path("src")
 
# All Python files
for py_file in path.glob("*.py"):
    print(py_file)
 
# Recursive
for py_file in path.rglob("*.py"):
    print(py_file)
 
# Pattern matching
list(path.glob("test_*.py"))

Directory operations

path = Path("new_dir")
 
# Create directory
path.mkdir(exist_ok=True)
path.mkdir(parents=True, exist_ok=True)
 
# List contents
for item in path.iterdir():
    print(item)
 
# Remove
path.rmdir()  # Must be empty

File operations

path = Path("file.txt")
 
# Delete
path.unlink(missing_ok=True)
 
# Rename/move
path.rename("new_name.txt")
 
# Copy (need shutil)
import shutil
shutil.copy(path, "destination.txt")

Comparison Table

Operationos.pathpathlib
Join pathsos.path.join(a, b)Path(a) / b
Get parentos.path.dirname(p)path.parent
Get filenameos.path.basename(p)path.name
Get extensionos.path.splitext(p)[1]path.suffix
Check existsos.path.exists(p)path.exists()
Is fileos.path.isfile(p)path.is_file()
Is directoryos.path.isdir(p)path.is_dir()
Absolute pathos.path.abspath(p)path.resolve()
Read fileopen(p).read()path.read_text()

Migration Tips

# Convert string to Path
path = Path("/some/path")
 
# Convert Path to string (when needed)
str_path = str(path)
 
# Most functions accept Path objects now
with open(path) as f:
    content = f.read()

My Rules

  1. Use pathlib for new code — it's cleaner
  2. Use / operator — not string concatenation
  3. Use resolve() — for absolute paths
  4. Use rglob() — for recursive file finding
  5. Convert to str — only when library requires it

pathlib is Python's modern path handling. Use it.

React to this post: