I've used as base idea of Peyman Naseri and want to share with my implementation,
import numpy as np
import time
def minor(arr,i,j):
c = arr[:]
c = np.delete(c, (i),axis=0)
return [np.delete(row, (j),axis=0) for row in (c)]
def det(arr):
n = len(arr)
if n == 1 :return arr[0][0]
if n == 2 :return arr[0][0]*arr[1][1] - arr[0][1]*arr[1][0]
sum = 0
for i in range(0,n):
m = minor(arr,0,i)
sum =sum + ((-1)**i)*arr[0][i] * det(m)
return sum
matrix = np.random.randint(-5, 5, size=(10, 10)) # martix nxn with integer values in interval [-5, 5)
print(matrix)
start_time = time.time()
print("started:", start_time)
det(matrix)
end_time = time.time()
print("finished:", end_time)
print("--- %s seconds ---" % (end_time - start_time))
as result determinant calculation for matrix 10*10 on my laptop takes about 1 minute, I understand that my code not optimal but main reason for this implementation (maybe I lost something) I just need to get working code base on Peyman Naseri solution which seems to me very pretty.
[[ 2 -1 -1 0 -2 0 4 4 3 4]
[-3 1 -1 3 0 -3 -2 0 3 -1]
[ 2 -1 -4 3 0 -2 -2 -5 3 -5]
[-2 -1 2 -2 4 -3 -5 -1 -5 3]
[ 1 -4 1 -5 -5 4 -3 -5 3 1]
[ 2 4 0 -1 -1 -5 -2 -2 -3 -5]
[ 1 4 -3 -4 -5 0 0 0 -5 -1]
[ 0 -5 -5 4 -3 -2 2 -4 2 -5]
[-3 1 -1 -4 4 -5 3 -3 -4 0]
[ 0 -2 2 -3 1 3 2 0 -1 4]]
started: 1636896388.0213237
finished: 1636896442.846928
--- 54.82560420036316 seconds ---