The Simplest Logistic Regression
#import packages import numpy as np import matplotlib.p... 25
2018/06

# The Simplest Logistic Regression

#import packages
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage

#the definitions of the activation function
def sigmoid(z):
s = 1/(1+np.exp(-z))
return s

#the initialization
def initialize_with_zeros(dim):
w = np.zeros((dim,1))
b = 0

assert(w.shape == (dim,1))
assert(isinstance(b, float) or isinstance(b, int))

return w,b

#the forward and backward propagation
def propagate(w,b,X,Y):
m = X.shape

#compute the activation
A = sigmoid(np.dot(w.T,X)+b)
#compute the cost
cost = -(np.dot(Y,np.log(A).T)+np.dot(1-Y,np.log(1-A).T))/m

dw = (np.dot(X,(A-Y).T)/m)
db = np.sum(A-Y,axis = 1)/m

assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())

"db": db}

#the optimize function, using the gradient descent
def optimize(w,b,X,Y,num_iterations,learning_rate,print_cost = False):
costs = []

for i in range(num_iterations):

#compute the grads and cost with the propagation function

#得到导数

#update
w = w - learning_rate*dw
b = b - learning_rate*db

#record
if i % 100 == 0:
costs.append(cost)

# print every 100 times
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))

params = {"w": w,
"b": b}

"db": db}

#define the predict function
def predict(w,b,X):
m = X.shape
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape, 1)

A = sigmoid(np.dot(w.T,X)+b)

for i in range(A.shape):
if A[0,i] >= 0.5:
Y_prediction[0,i] = 1
else:
pass

assert(Y_prediction.shape == (1,m))

return Y_prediction

#merge into one function
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):

#initialize
w, b = initialize_with_zeros(X_train.shape)

#get the w and b from the set
w = parameters["w"]
b = parameters["b"]

#对训练集与测试集进行预测
Y_prediction_test = predict(w,b,X_test)
Y_prediction_train = predict(w,b,X_train)

print("train accuracy: {} %".format(100 -        np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))

d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}

return d

### References:The deep learning courses on cousera

Last modification：March 13th, 2019 at 07:06 pm
If you think my article is useful to you, please feel free to appreciate 