0

I am using Cassandra as a persistent store along with Hazelcast 3.6.For a rest API(POST). The Mapstore Implementation is as shown below.

import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;

import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.Insert;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.hazelcast.core.MapStore;

public class InventoryMapStoreImpl implements MapStore<String, Map<String, Integer>> {

    public static Cluster cluster;
    public static Session session;

    public InventoryMapStoreImpl() {
        cluster = Cluster.builder().addContactPoint("127.0.0.1").build();
        session = cluster.connect("company");
    }

    @Override
    public synchronized Map<String, Integer>  load(String key) {

        Map<String, Integer> qty = new HashMap<String, Integer>();
        ResultSetFuture futureList = session
                .executeAsync("Select * from company.onhandinventoryavailability WHERE key='" + key + "';");
        try {

            if (futureList.get() != null) {
                ResultSet rsFuture = futureList.get();
                Row resultOne = rsFuture.one();
                if (resultOne != null) {
                    qty = resultOne.getMap(1, String.class, Integer.class);
                }
            }
            session.close();
            cluster.close();
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ExecutionException e) {
            e.printStackTrace();
        }
        return qty;
    }

    @Override
    public Map<String, Map<String, Integer>> loadAll(Collection<String> keys) {
        return null;
    }

    @Override
    public Iterable<String> loadAllKeys() {
        return null;
    }

    @Override
    public synchronized void store(String key, Map<String, Integer> value) {

        try {

            Insert insert = QueryBuilder.insertInto("onhandinventoryavailability")
                    .value("key", key);
              insert.value("value", value);
              session.execute(insert);
            System.out.println("INSERTED:"+key+" INTO CASSANDRA...");
        } catch (Exception e) {
            System.out.println("ERORRRR");
            e.printStackTrace();
        }
        session.close();
        cluster.close();
    }

    @Override
    public void storeAll(Map<String, Map<String, Integer>> map) {
    }

    @Override
    public void delete(String key) {
    }

    @Override
    public void deleteAll(Collection<String> keys) {
    }
}

And the hazelcast xml entry for the map is.

map name="onHandInventoryAvailability">
                <map-store enabled="true">
                        <class-name>com.company.common.hazelcast.mapstore.InventoryMapStoreImpl</class-name>
                        <write-delay-seconds>5</write-delay-seconds>
                        <write-batch-size>1000</write-batch-size>
                        <write-coalescing>true</write-coalescing>
                </map-store>
                <in-memory-format>BINARY</in-memory-format>
                <backup-count>1</backup-count>
                <async-backup-count>0</async-backup-count>
                <time-to-live-seconds>0</time-to-live-seconds>
                <max-idle-seconds>0</max-idle-seconds>
                <eviction-policy>NONE</eviction-policy>
                <max-size policy="PER_NODE">0</max-size>
                <eviction-percentage>25</eviction-percentage>
                <min-eviction-check-millis>100</min-eviction-check-millis>
                <merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy>
                <cache-deserialized-values>INDEX-ONLY</cache-deserialized-values>
         </map>

The problem is that when I hit the POST api after a delay or one at a time the data comes in both hazelcast and cassandra, But when I do more POSTs say 10,100 etc the connection to cassandra is lost by hazelcast and the write behind doen't work properly.Only one or two records out of 10,100 etc go cassandra(Not all).Is there anything wrong in my mapstore implementation?

user2966021
  • 423
  • 1
  • 7
  • 20

1 Answers1

1

Hazelcast will instantiate your MapStore implementation just once and invoke load, store etc methods on this instance. This means that you connect to a Cassandra cluster and open a session just once (in your MapStore's constructor) and when load or store method is invoked just once, you disconnect from your Cassandra cluster. This explains the current behavior of your code.

Cassandra driver's Cluster & Session objects are supposed to span the entire lifetime of your application [1]. As already suggested in the comments, you should not close them until your application is shutdown.

[1] http://docs.datastax.com/en/developer/java-driver/2.0/java-driver/fourSimpleRules.html