3

I would like to know if there is any way to optimize the distance calculation process below. I left a small example below, however I am working with a spreadsheet with more than 6000 rows, and it takes considerable time to calculate the variable d. It would be possible to somehow adjust this to have the same results, but in an optimized way.

library(rdist)
library(tictoc)
library(geosphere)

time<-tic()

df<-structure(list(Industries=c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19), Latitude = c(-23.8, -23.8, -23.9, -23.9, -23.9,  -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, 
+ + -23.9, -23.9, -23.9, -23.9, -23.9), Longitude = c(-49.6, -49.6, -49.6, -49.6, -49.6, -49.6, -49.6, -49.6, -49.6, -49.6, -49.7, 
+ + -49.7, -49.7, -49.7, -49.7, -49.6, -49.6, -49.6, -49.6)), class = "data.frame", row.names = c(NA, -19L))

k=3 
#clusters
coordinates<-df[c("Latitude","Longitude")]
d<-as.dist(distm(coordinates[,2:1]))
fit.average<-hclust(d,method="average") 
clusters<-cutree(fit.average, k) 
nclusters<-matrix(table(clusters))  
df$cluster <- clusters 

time<-toc()

1.54 sec elapsed

d
          1        2        3        4        5        6        7        8
2      0.00                                                               
3  11075.61 11075.61                                                      
4  11075.61 11075.61     0.00                                             
5  11075.61 11075.61     0.00     0.00                                    
6  11075.61 11075.61     0.00     0.00     0.00                           
7  11075.61 11075.61     0.00     0.00     0.00     0.00                  
8  11075.61 11075.61     0.00     0.00     0.00     0.00     0.00         
9  11075.61 11075.61     0.00     0.00     0.00     0.00     0.00     0.00
10 11075.61 11075.61     0.00     0.00     0.00     0.00     0.00     0.00
11 15048.01 15048.01 10183.02 10183.02 10183.02 10183.02 10183.02 10183.02
12 15048.01 15048.01 10183.02 10183.02 10183.02 10183.02 10183.02 10183.02
13 15048.01 15048.01 10183.02 10183.02 10183.02 10183.02 10183.02 10183.02
14 15048.01 15048.01 10183.02 10183.02 10183.02 10183.02 10183.02 10183.02
15 15048.01 15048.01 10183.02 10183.02 10183.02 10183.02 10183.02 10183.02
16 11075.61 11075.61     0.00     0.00     0.00     0.00     0.00     0.00
17 11075.61 11075.61     0.00     0.00     0.00     0.00     0.00     0.00
18 11075.61 11075.61     0.00     0.00     0.00     0.00     0.00     0.00
19 11075.61 11075.61     0.00     0.00     0.00     0.00     0.00     0.00
          9       10       11       12       13       14       15       16
2                                                                         
3                                                                         
4                                                                         
5                                                                         
6                                                                         
7                                                                         
8                                                                         
9                                                                         
10     0.00                                                               
11 10183.02 10183.02                                                      
12 10183.02 10183.02     0.00                                             
13 10183.02 10183.02     0.00     0.00                                    
14 10183.02 10183.02     0.00     0.00     0.00                           
15 10183.02 10183.02     0.00     0.00     0.00     0.00                  
16     0.00     0.00 10183.02 10183.02 10183.02 10183.02 10183.02         
17     0.00     0.00 10183.02 10183.02 10183.02 10183.02 10183.02     0.00
18     0.00     0.00 10183.02 10183.02 10183.02 10183.02 10183.02     0.00
19     0.00     0.00 10183.02 10183.02 10183.02 10183.02 10183.02     0.00
         17       18
2                   
3                   
4                   
5                   
6                   
7                   
8                   
9                   
10                  
11                  
12                  
13                  
14                  
15                  
16                  
17                  
18     0.00         
19     0.00     0.00

Comparation

> df$cluster <- clusters 
> df
   Industries Latitude Longitude cluster
1           1    -23.8     -49.6       1
2           2    -23.8     -49.6       1
3           3    -23.9     -49.6       2
4           4    -23.9     -49.6       2
5           5    -23.9     -49.6       2
6           6    -23.9     -49.6       2
7           7    -23.9     -49.6       2
8           8    -23.9     -49.6       2
9           9    -23.9     -49.6       2
10         10    -23.9     -49.6       2
11         11    -23.9     -49.7       3
12         12    -23.9     -49.7       3
13         13    -23.9     -49.7       3
14         14    -23.9     -49.7       3
15         15    -23.9     -49.7       3
16         16    -23.9     -49.6       2
17         17    -23.9     -49.6       2
18         18    -23.9     -49.6       2
19         19    -23.9     -49.6       2

> clustered_df
   Industries Latitude Longitude cluster     Dist Cluster
1          11    -23.9     -49.7       3     0.00       1
2          12    -23.9     -49.7       3     0.00       1
3          13    -23.9     -49.7       3     0.00       1
4          14    -23.9     -49.7       3     0.00       1
5          15    -23.9     -49.7       3     0.00       1
6           3    -23.9     -49.6       2 10183.02       2
7           4    -23.9     -49.6       2     0.00       2
8           5    -23.9     -49.6       2     0.00       2
9           6    -23.9     -49.6       2     0.00       2
10          7    -23.9     -49.6       2     0.00       2
11          8    -23.9     -49.6       2     0.00       2
12          9    -23.9     -49.6       2     0.00       2
13         10    -23.9     -49.6       2     0.00       2
14         16    -23.9     -49.6       2     0.00       2
15         17    -23.9     -49.6       2     0.00       2
16         18    -23.9     -49.6       2     0.00       2
17         19    -23.9     -49.6       2     0.00       2
18          1    -23.8     -49.6       1 11075.61       3
19          2    -23.8     -49.6       1     0.00       3
Antonio
  • 1,091
  • 7
  • 24
  • 1
    Your code doesn't make much sense. From previous questions I know that you are trying to map out drive times between certain points. Here you are using Great Circle distances. Perhaps using OSRM to measure the drive time/drive distance between points is more suitable? – hello_friend May 28 '20 at 03:09
  • Thanks friend for the answer. I tried to leave a very brief example, I don't know if it was understandable. I am using hierarchical clustering, where it is necessary to calculate the distance between all points before generating the clusters. I would like to know if it is possible to calculate the distance in a faster way, I heard that it has the haversine function of the spatialrisk package (which it is implemented in Rcpp), tends to be faster, but I tried I couldn't. The Osrm package I have not tried for this purpose in the example above, but I can try. – Antonio May 28 '20 at 13:55
  • may I recommend reading the documentation for OSRM. https://cran.r-project.org/web/packages/osrm/readme/README.html – hello_friend May 28 '20 at 14:43
  • Please see my solution below and upvote and accept the answer if it has done what you are after! – hello_friend May 29 '20 at 04:21
  • Thanks for the reply. I am without my computer at the moment but as soon as I test it, I will let you known. – Antonio May 29 '20 at 04:40
  • Hello friend, I accepted your answer. I just didn't understand a few things: I didn't quite understand this calculation of the variable "d". From the result I got, I have only one column with distance. Wouldn't it be the distance from all points? And the other is I need to use the hclust function, as it is the function corresponding to hierarchical clustering. – Antonio May 29 '20 at 14:11
  • the Lon lats are ordered we get the distances between each point and the next one only and then we determine if that distance between points is greater than 1/k * 100 percent of the values. My question to you is why do you need to do distances/hclust between all values if they are sorted ? In this case if distance between each value is calculated does that not achieve the same result ? From your code above after calculating matrices for both steps you then only reuse the cluster vector. – hello_friend May 29 '20 at 23:23
  • Thanks for the answer and explanation. I question this, just because there are industries that were not in the same cluster. Please look at the code I inserted above, comparing the two, both the first way I did it and the way you did it. In the first case industries 1 and 2 are in cluster 1, in the second they are in cluster 3. Can you adjust this? And another question just to know, is it possible to insert the variable "d" that you did for the hclust function? – Antonio May 30 '20 at 00:32
  • why does the cluster number matter? Are the industries correctly clustered together on the full dataset ? If you have a further question please post a link to it and I will answer it there! – hello_friend May 30 '20 at 04:37

1 Answers1

1

@Jose Perhaps not as sound mathematically (in terms of the clustering) but (generally) a better measure of great circle distances (Vincenty's formulae). And ~8 times faster to achieve (what I think is your desired result) - (just using your sample data).

# Order the dataframe by Lon and Lat: ordered_df => data.frame
ordered_df <- 
  df %>% 
  arrange(., Longitude, Latitude)  

# Scalar valued at how many clusters we are expecting => integer vector
k = 3

# Matrix of co-ordinates: coordinates => matrix
coordinates <-   
  ordered_df %>% 
  select(Longitude, Latitude) %>% 
  as.matrix()

# Generate great circle distances between points and Long-Lat Matrix: d => data.frame
d <- data.frame(Dist = c(0, distVincentyEllipsoid(coordinates)))

# Segment the distances into groups: cluster => factor 
d$Cluster <- factor(cumsum(d$Dist > (quantile(d$Dist, 1/k))) + 1)

# Merge with base data: clustered_df => data.frame
clustered_df <- cbind(ordered_df, d)

Libraries and sample data:

library(geosphere)
library(dplyr)

df <- structure(list(Industries=c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19), 
Latitude = c(-23.8, -23.8, -23.9, -23.9, -23.9,  -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9, -23.9),
Longitude = c(-49.6, -49.6, -49.6, -49.6, -49.6, -49.6, -49.6, -49.6, -49.6, -49.6, -49.7,-49.7, -49.7, -49.7, -49.7, -49.6, -49.6, -49.6, -49.6)),
class = "data.frame", row.names = c(NA, -19L))
start_time <- Sys.time()
hello_friend
  • 5,682
  • 1
  • 11
  • 15