import sys

def solve():
    # ========== 1. 读取所有输入数据 ==========
    # 读取整个标准输入,按空白字符(空格、换行等)分割成字符串列表
    # 例如:"3\n100\n0.10\n100 200 150 6000\n..." -> ["3","100","0.10","100","200","150","6000",...]
    data = sys.stdin.read().strip().split()
    
    # 如果没有数据,直接返回
    if not data:
        return
    
    # ========== 2. 解析前三行参数 ==========
    idx = 0                      # 当前读取位置指针
    m = int(data[idx]); idx += 1 # m: 样本数量
    n = int(data[idx]); idx += 1 # n: 梯度下降迭代次数
    alpha = float(data[idx]); idx += 1  # alpha: 学习率
    
    # ========== 3. 存储原始数据 ==========
    # 创建四个空列表,分别存储 x1, x2, x3, y
    X1 = []
    X2 = []
    X3 = []
    Y = []
    
    # 循环读取 m 行数据,每行4个整数
    for _ in range(m):
        X1.append(int(data[idx])); idx += 1
        X2.append(int(data[idx])); idx += 1
        X3.append(int(data[idx])); idx += 1
        Y.append(int(data[idx])); idx += 1
    
    # ========== 4. 计算归一化参数 ==========
    # 每个特征的最小值、最大值、范围
    min1 = min(X1); max1 = max(X1); range1 = max1 - min1
    min2 = min(X2); max2 = max(X2); range2 = max2 - min2
    min3 = min(X3); max3 = max(X3); range3 = max3 - min3
    
    # ========== 5. 特征归一化 ==========
    # 创建归一化后的特征数组,初始全为 0.0
    X1_norm = [0.0] * m
    X2_norm = [0.0] * m
    X3_norm = [0.0] * m
    
    # 归一化公式: x_norm = (x - min) / (max - min)
    # 如果特征无波动(range=0),则归一化后为 0
    for i in range(m):
        X1_norm[i] = (X1[i] - min1) / range1 if range1 != 0 else 0.0
        X2_norm[i] = (X2[i] - min2) / range2 if range2 != 0 else 0.0
        X3_norm[i] = (X3[i] - min3) / range3 if range3 != 0 else 0.0
    
    # ========== 6. 初始化权重 ==========
    # w0: 偏置项 b, w1/w2/w3: 特征权重
    w0 = 0.0
    w1 = 0.0
    w2 = 0.0
    w3 = 0.0
    
    # ========== 7. 批量梯度下降 ==========
    # 迭代 n 次
    for _ in range(n):
        # 梯度累加器(每个样本的梯度求和)
        sum_dw0 = 0.0
        sum_dw1 = 0.0
        sum_dw2 = 0.0
        sum_dw3 = 0.0
        
        # 遍历所有样本,累加梯度
        for i in range(m):
            # 预测值 = b + w1*x1 + w2*x2 + w3*x3
            y_pred = w0 + w1 * X1_norm[i] + w2 * X2_norm[i] + w3 * X3_norm[i]
            
            # 误差 = 预测值 - 真实值
            error = y_pred - Y[i]
            
            # 累加梯度
            # dw0 = error * 1(偏置项的 x 恒为 1)
            sum_dw0 += error
            # dw1 = error * x1
            sum_dw1 += error * X1_norm[i]
            # dw2 = error * x2
            sum_dw2 += error * X2_norm[i]
            # dw3 = error * x3
            sum_dw3 += error * X3_norm[i]
        
        # 计算平均梯度 = 梯度总和 / 样本数量
        dw0 = sum_dw0 / m
        dw1 = sum_dw1 / m
        dw2 = sum_dw2 / m
        dw3 = sum_dw3 / m
        
        # 更新权重: w = w - α * gradient
        w0 -= alpha * dw0
        w1 -= alpha * dw1
        w2 -= alpha * dw2
        w3 -= alpha * dw3
    
    # ========== 8. 权重还原 ==========
    # 将归一化后的权重还原回原始尺度
    # 公式: w_real = w_norm / (max - min)
    if range1 != 0:
        w1_real = w1 / range1
    else:
        w1_real = 0.0
    
    if range2 != 0:
        w2_real = w2 / range2
    else:
        w2_real = 0.0
    
    if range3 != 0:
        w3_real = w3 / range3
    else:
        w3_real = 0.0
    
    # 偏置项还原: b_real = b_norm - Σ(w_real * min)
    w0_real = w0 - (w1_real * min1 + w2_real * min2 + w3_real * min3)
    
    # ========== 9. 输出结果 ==========
    # 保留2位小数,用空格分隔
    print(f"{w0_real:.2f} {w1_real:.2f} {w2_real:.2f} {w3_real:.2f}")

if __name__ == "__main__":
    solve()