UT-Austin-RPL/deoxys_control

Yet another implementation of OSC move

Steve-Tod opened this issue · 0 comments

def move_to_coord(target, gripper, max_step=100, action_scale=0.7, scale_factor_low=0.5, scale_factor_high=2.0, tol=5e-3):
    distance = 100
    prev_pos = np.zeros(3)
    
    while True:
        if len(robot_interface._state_buffer) == 0 or np.max(np.abs(robot_interface.last_state.O_T_EE)) < 1e-3:
            continue
        break
    for step in range(max_step):
        if len(robot_interface._state_buffer) > 0:
            cur_joint = robot_interface._state_buffer[-1].q
            # cur_pos = urdf_model.get_link_pose(cur_joint, 3)[0]
            # cur_pos = np.array(cur_pos)
            _, cur_pos = robot_interface.last_eef_rot_and_pos
            cur_pos = cur_pos.reshape(3)
            offset = target - cur_pos
            distance = np.linalg.norm(offset)
            # print(offset, distance, tol)
            if distance < tol:
                break
            delta = np.linalg.norm(cur_pos - prev_pos)
            prev_pos = cur_pos
            # print(distance, delta)
            if np.max(np.abs(np.array(cur_joint) - np.zeros(7))) < 5e-3:
                action = np.zeros(3)
            else:
                action = np.copy(offset) / np.linalg.norm(offset)
                action *= action_scale
                if distance < tol * 10:
                    action *= scale_factor_low
                if delta < tol and step > 0.2 * max_step:
                    # stopped for a while
                    rel_scale = (1.0 * step / max_step - 0.2) * 2
                    rel_scale = (1 - rel_scale) + rel_scale * scale_factor_high
                    action *= rel_scale
                action = np.clip(action, a_min=-1, a_max=1)
        else:
            action = np.zeros(3)
        gripper_state = -1.0 if gripper else 1.0
        action = np.concatenate((action, np.array([0, 0, 0, gripper_state])))
        controller_cfg.state_estimator_cfg.is_estimation = True
        controller_cfg.state_estimator_cfg.alpha_eef = 0.8
        robot_interface.control(
            controller_type=controller_type, action=action, controller_cfg=controller_cfg
        )
    return distance < tol #, distance, offset #, cur_joint