# int vs float — two distinct number types
# They look similar. They behave differently.

# int — whole number, no decimal point
a = 10
# float — has a decimal point
b = 10.0

print(type(a))  # <class 'int'>
print(type(b))  # <class 'float'>

# Same value — different type
print(a == b)   # True — same value
print(type(a) == type(b))  # False — different type

# Division always returns float
x = 7
y = 2

print(x / y)   # 3.5  — float, always
print(x // y)  # 3    — int, floor division
print(x % y)   # 1    — int, remainder

# Even when the result is whole
print(10 / 2)         # 5.0 — not 5
print(type(10 / 2))   # <class 'float'>

# Converting between types
a = 3.9
print(int(a))    # 3 — truncates, does NOT round

b = 5
print(float(b))  # 5.0

# int() always truncates — never rounds
print(int(3.1))  # 3
print(int(3.9))  # 3 — still 3, not 4
print(int(-3.9)) # -3 — toward zero, not -4

# Mixing int and float — Python returns float
print(5 + 2.0)        # 7.0 — not 7
print(type(5 + 2.0))  # <class 'float'>

# Float precision — a known quirk
print(0.1 + 0.2)      # 0.30000000000000004 — not exactly 0.3
print(0.1 + 0.2 == 0.3)  # False — floating point precision
