So i saw this VHDL code for a testbench for a DFF somewhere and i don't quite get a few things.
1) Why are there 5 cases? Why aren't there just two? when the input is 0 and when it is 1; 2) Why did he pick those waiting periods so randomly? It seems that 12,28,2,10,20 ns seem very randomly chosen. What was the logic behind that?
architecture testbench of dff_tb is
signal T_din: std_logic;
signal T_dclk: std_logic;
signal T_qout: std_logic;
signal T_nqout: std_logic;
component dff
port ( din: in std_logic;
dclk: in std_logic;
qout: out std_logic;
nqout: out std_logic
);
end component;
begin
dut_dff: dff port map (T_din,T_dclk,T_qout,T_nqout);
process
begin
T_dclk <= '0';
wait for 5 ns;
T_dclk <= '1';
wait for 5 ns;
end process;
process
variable err_cnt: integer := 0;
begin
--case1
T_din <= '1';
wait for 12 ns;
assert (T_qout='1') report "Error1!" severity error;
-- case 2
T_din <= '0';
wait for 28 ns;
assert (T_qout='0') report "Error2!" severity error;
-- case 3
T_din <= '1';
wait for 2 ns;
assert (T_qout='0') report "Error3!" severity error;
-- case 4
T_din <= '0';
wait for 10 ns;
assert (T_qout='0') report "Error4!" severity error;
-- case 5
T_din <= '1';
wait for 20 ns;
assert (T_qout='1') report "Error5!" severity error;
wait;
end process;
end testbench;