projects_with_roles = [
{ id: 1, name: 'First', roles: ['user', 'compliance_lead'] },
{ id: 5, name: 'Five', roles: ['financial_lead'] }
]
projects_to_add = [
{ id: 5, name: 'Five', roles: ['technical_lead'] },
{ id: 10, name: 'Ten', roles: ['user'] }
]
(projects_with_roles + projects_to_add).each_with_object({}) do |g,h|
h.update([g[:id], g[:name]]=>g[:roles]) { |_,o,n| o|n }
end.map { |(id,name),roles| { id: id, name: name, roles:roles } }
#=> [{:id=>1, :name=>"First", :roles=>["user", "compliance_lead"]},
# {:id=>5, :name=>"Five", :roles=>["financial_lead", "technical_lead"]},
# {:id=>10, :name=>"Ten", :roles=>["user"]}]
This does not mutate projects_with_roles
. If that is desired set projects_with_roles
equal to the above calculation.
This uses the form of Hash#update (a.k.a. merge!
) which employs the block { |_,o,n| o|n }
to determine the values of keys that are present in both hashes being merged. See the doc for an explanation of the values of the block's three block variables (_
, o
and n
). (I've represented the first, the common key, with an underscore to signal that it is not used in the block calculation.
Note that the intermediate calculation is as follows:
(projects_with_roles + projects_to_add).each_with_object({}) do |g,h|
h.update([g[:id], g[:name]]=>g[:roles]) { |_,o,n| o|n }
end
#=> {[1, "First"]=>["user", "compliance_lead"],
# [5, "Five"]=>["financial_lead", "technical_lead"],
# [10, "Ten"]=>["user"]}
By building a hash and then converting it to an array of hashes the computational complexity is kept to nearly O(projects_with_roles.size + projects_to_add.size)
as hash key lookups are close to O(1)
.