0

I'm supposed to write up a 16-bit ALU. My professor wants us to try and code the adder and sub of the ALU with a signal tmp : std_logic_vector(16 downto 0); and then in the case for the select input s we put: tmp <= conv_std_logic_vector(conv_integer(a) + conv_integer(b), 17);

After experimenting with it for a while, my waveform only showed the inputs' values as UUUUUUUUUUUUUUUU. Even after I had commented out the conv_std_logic_vector(...) stuff.

Is there a simple explanation as to why my inputs aren't showing up in the waveform?

Here is my code:

-- 16-Bit ALU
-- By: Logan Jordon
library ieee;
use ieee.std_logic_1164.all;
use ieee.std_logic_unsigned.all;
use IEEE.NUMERIC_STD.ALL;
--use ieee.std_logic_arith.all;
 
entity alu16 is
    port (
        a : in std_logic_vector(15 downto 0);
        b : in std_logic_vector(15 downto 0);
        s : in std_logic_vector(1 downto 0);
        r    : out std_logic_vector(15 downto 0);
        cout : out std_logic;
        lt, eq, gt : out std_logic;
        overflow     : out std_logic
        );
end entity alu16;

architecture beh of alu16 is
signal tmp : std_logic_vector(16 downto 0);
signal add_overflow : std_logic;
signal sub_overflow : std_logic;
begin
    -- PROCESS
process(a, b, add_overflow, sub_overflow)
begin
    case s is
        --ADD
        when "00" =>
            --tmp <= conv_std_logic_vector(conv_integer(a) + conv_integer(b), 17);
            tmp <= a + b;
            overflow <= add_overflow;
        --SUB
        when "01" =>
            --tmp <= conv_std_logic_vector(conv_integer(a) - conv_integer(b), 17);
            tmp <= a - b;
            overflow <= sub_overflow;
        --AND
        when "10" =>
            tmp <= '0' & a AND b;
            overflow <= '0';
        --OR
        when "11" =>
            tmp <= '0' & a OR b;
            overflow <= '0';
        when others =>
            tmp <= "00000000000000000";
    end case;
--One-Bitters
if a > b then
    gt <= '1';
    lt <= '0';
    eq <= '0';
elsif a < b then
    lt <= '1';
    gt <= '0';
    eq <= '0';
elsif a = b then
    eq <= '1';
    lt <= '0';
    gt <= '0';
end if;
end process;

--OUTPUTS
cout <= tmp(16);
r <= tmp(15 downto 0);
add_overflow <= '1' when (a(15) = b(15)) and (a(15) /= tmp(15))
    else '0';
sub_overflow <= '1' when (a(15) = NOT b(15)) and (a(15) /= tmp(15))
    else '0';

end beh;

EDIT: In the case that it might be my test bench, here's the code for my testbench:

library ieee;
use ieee.std_logic_1164.all;
use ieee.std_logic_unsigned.all;
use IEEE.NUMERIC_STD.ALL;

entity alu16_tb is
end alu16_tb;

architecture behavior of alu16_tb is

component ALU16
port(
        a               : in std_logic_vector(15 downto 0);
        b               : in std_logic_vector(15 downto 0);
        s               : in std_logic_vector(1 downto 0);
        r               : out std_logic_vector(15 downto 0);
        cout            : out std_logic;
        lt, eq, gt      : out std_logic;
        overflow        : out std_logic
        );
end component;

-- Signals to interface with the UUT
--  Set each of the input vectors to unique values to avoid
--  needing a process to drive them below
signal a    : std_logic_vector(15 downto 0) := "0000000000000000";
signal b    : std_logic_vector(15 downto 0) := "0000000000000000";
signal s    : std_logic_vector(1 downto 0) := "00";
signal r    : std_logic_vector(15 downto 0):= "0000000000000000";
signal cout : std_logic := '0';
signal lt   : std_logic := '0';
signal gt   : std_logic := '0';
signal eq   : std_logic := '0';
signal overflow     : std_logic := '0';
constant tick : time := 10 ns;

begin

 -- Instantiate the Unit Under Test (UUT)
 uut : ALU16 port map (
      a => a,
      b => b,
      s => s,
      r => r,
      cout => cout, 
      lt => lt,
      gt => gt,
      eq => eq,
      overflow => overflow
         );

   -- Drive selector bits
 drive_s : process
 begin
     a <= "0000000000000001";
     b <= "0000000000000010";

     wait for (tick*2);
     s <= "00";

     wait for (tick*2);
     s <= "01";

     wait for (tick*2);
     s <= "10";

     wait for (tick*2);
     s <= "11";
 end process drive_s;
end;
  • You don't appear to have bounds checking turned on during simulation and there are five (or more) errors in your code. Missing sensitivity item s for your PROCESS, four length mismatches ("-", "+", "and" and "or"). Fix those and there's [no sign of](https://i.stack.imgur.com/gjuql.png) 'U's. Show us the waveform and matching code. –  Feb 10 '17 at 23:45
  • Preferably don't use `std_logic_unsigned` (and never use `std_logic_arith`). `numeric_std` already contains the correct arithmetic. You should specify `signed` and `unsigned` data types for arithmetic, or you could even use integer arithmetic and convert. Did your professor seriously tell you to use `tmp <= conv_std_logic_vector(conv_integer(a) + conv_integer(b), 17);`??? That is very-very old and deprecated way of working... `numeric_std` has been the official IEEE standard for many years [link]http://vhdlguru.blogspot.nl/2010/03/why-library-numericstd-is-preferred.html – JHBonarius Feb 21 '17 at 14:17

0 Answers0