import numpy as np
def linear_regression_gradient_descent(X, y, alpha, iterations):
    #初始化参数
    w = 0.0
    b = 0.0
    m = len(X)
    for it in range(iterations):
        dw = 0.0
        db = 0.0
        for i in range(len(X)):
            x_i = X[i][1]
            #预测值
            y_hat = w*x_i+b
            #误差
            e = y_hat - y[i]
            #求偏导 e对w和b的
            dw += e*x_i
            db += e
        dw /= m
        db/=m

        #更新 w和b
        w -= alpha * dw
        b -= alpha * db
        theta = np.array([b, w])
        theta_1d = theta.flatten() 
    return np.round(theta_1d, 4)
# 主程序
if __name__ == "__main__":
    # 输入矩阵和向量
    matrix_inputx = input()
    array_y = input()
    alpha = input()
    iterations = input()

    # 处理输入
    import ast
    matrix = np.array(ast.literal_eval(matrix_inputx))
    y = np.array(ast.literal_eval(array_y)).reshape(-1,1)
    alpha = float(alpha)
    iterations = int(iterations)

    # 调用函数计算逆矩阵
    output = linear_regression_gradient_descent(matrix,y,alpha,iterations)
    
    # 输出结果
    print(output)