import numpy as np
class LinearRegression:
def __init__(self, learning_rate=0.01, num_iterations=1000):
self.learning_rate = learning_rate
self.num_iterations = num_iterations
self.weights = None
self.bias = None
def fit(self, X, y):
# Add a column of ones to X for the bias term
X = np.column_stack((np.ones(len(X)), X))
# Initialize weights and bias
self.weights = np.zeros(X.shape[1])
self.bias = 0
# Gradient Descent
for _ in range(self.num_iterations):
predictions = self.predict(X)
errors = predictions - y
# Update weights and bias
self.weights -= self.learning_rate * (1 / len(X)) * np.dot(errors, X)
self.bias -= self.learning_rate * (1 / len(X)) * np.sum(errors)
def predict(self, X):
if self.weights is None or self.bias is None:
raise Exception("Model not trained. Call fit() first.")
return np.dot(X, self.weights) + self.bias
# Example usage:
# Assuming X and y are your input features and target variable, respectively
X = np.array([1, 2, 3, 4, 5])
y = np.array([2, 4, 5, 4, 5])
# Reshape X to a column vector
X = X.reshape(-1, 1)
# Create and train the linear regression model
model = LinearRegression()
model.fit(X, y)
# Make predictions
new_data = np.array([6, 7, 8])
new_data = new_data.reshape(-1, 1)
predictions = model.predict(np.column_stack((np.ones(len(new_data)), new_data)))
print("Predictions:", predictions)