1

I am using IBM Watson's Natural Language Understanding API. I used the following code from the API's documentation to return a sentiment analysis of some tweets by Nike stored in a Dataframe:

import json
 from watson_developer_cloud import NaturalLanguageUnderstandingV1
 from watson_developer_cloud.natural_language_understanding_v1 \
   import Features, EntitiesOptions, KeywordsOptions

naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1(
   version='2018-09-21',
   iam_apikey='[KEY HIDDEN]',
   url='https://gateway.watsonplatform.net/natural-language- 
understanding/api')

for tweet in nikedf["text"]:
    response = naturalLanguageUnderstanding.analyze(
      text=tweet,
      features=Features(
        entities=EntitiesOptions(
          emotion=False,
          sentiment=True,
          limit=2),
        keywords=KeywordsOptions(
          emotion=False,
          sentiment=True,
          limit=2))).get_result()
    print(json.dumps(response, indent=2))

I return a string json dump like follows.

{
  "usage": {
    "text_units": 1,
    "text_characters": 140,
    "features": 2
  },
  "language": "en",
  "keywords": [
    {
      "text": "Kaepernick7 Kapernick",
      "sentiment": {
        "score": 0.951279,
        "label": "positive"
      },
      "relevance": 0.965894,
      "count": 1
    },
    {
      "text": "campaign",
      "sentiment": {
        "score": 0.951279,
        "label": "positive"
      },
      "relevance": 0.555759,
      "count": 1
    }
  ],
  "entities": [
    {
      "type": "Company",
      "text": "nike",
      "sentiment": {
        "score": 0.899838,
        "label": "positive"
      },
      "relevance": 0.92465,
      "disambiguation": {
        "subtype": [],
        "name": "Nike, Inc.",
        "dbpedia_resource": "http://dbpedia.org/resource/Nike,_Inc."
      },
      "count": 2
    },
    {
      "type": "Company",
      "text": "Kapernick",
      "sentiment": {
        "score": 0.899838,
        "label": "positive"
      },
      "relevance": 0.165888,
      "count": 1
    }
  ]
}
{
  "usage": {
    "text_units": 1,
    "text_characters": 140,
    "features": 2
  },
  "language": "en",
  "keywords": [
    {
      "text": "ORIGINS PAY",
      "sentiment": {
        "score": 0.436905,
        "label": "positive"
      },
      "relevance": 0.874857,
      "count": 1
    },
    {
      "text": "RT",
      "sentiment": {
        "score": 0.436905,
        "label": "positive"
      },
      "relevance": 0.644407,
      "count": 1
    }
  ],
  "entities": [
    {
      "type": "Company",
      "text": "Nike",
      "sentiment": {
        "score": 0.0,
        "label": "neutral"
      },
      "relevance": 0.922792,
      "disambiguation": {
        "subtype": [],
        "name": "Nike, Inc.",
        "dbpedia_resource": "http://dbpedia.org/resource/Nike,_Inc."
      },
      "count": 1
    },
    {
      "type": "TwitterHandle",
      "text": "@IcySoleOnline",
      "sentiment": {
        "score": 0.0,
        "label": "neutral"
      },
      "relevance": 0.922792,
      "count": 1
    }
  ]
}
{
  "usage": {
    "text_units": 1,
    "text_characters": 137,
    "features": 2
  },
  "language": "en",
  "keywords": [
    {
      "text": "RT",
      "sentiment": {
        "score": 0.946834,
        "label": "positive"
      },
      "relevance": 0.911909,
      "count": 2
    },
    {
      "text": "SPOTS",
      "sentiment": {
        "score": 0.946834,
        "label": "positive"
      },
      "relevance": 0.533273,
      "count": 1
    }
  ],
  "entities": [
    {
      "type": "TwitterHandle",
      "text": "@dropssupreme",
      "sentiment": {
        "score": 0.0,
        "label": "neutral"
      },
      "relevance": 0.01,
      "count": 1
    }
  ]
}
{
  "usage": {
    "text_units": 1,
    "text_characters": 140,
    "features": 2
  },
  "language": "en",
  "keywords": [
    {
      "text": "Golden Touch' boots",
      "sentiment": {
        "score": 0,
        "label": "neutral"
      },
      "relevance": 0.885418,
      "count": 1
    },
    {
      "text": "RT",
      "sentiment": {
        "score": 0,
        "label": "neutral"
      },
      "relevance": 0.765005,
      "count": 1
    }
  ],
  "entities": [
    {
      "type": "Company",
      "text": "Nike",
      "sentiment": {
        "score": 0.0,
        "label": "neutral"
      },
      "relevance": 0.33,
      "disambiguation": {
        "subtype": [],
        "name": "Nike, Inc.",
        "dbpedia_resource": "http://dbpedia.org/resource/Nike,_Inc."
      },
      "count": 1
    },
    {
      "type": "Person",
      "text": "Luka Modri\u0107",
      "sentiment": {
        "score": 0.0,
        "label": "neutral"
      },
      "relevance": 0.33,
      "disambiguation": {
        "subtype": [
          "Athlete",
          "FootballPlayer"
        ],
        "name": "Luka Modri\u0107",
        "dbpedia_resource": "http://dbpedia.org/resource/Luka_Modri\u0107"
      },
      "count": 1
    }
  ]
}

How do I convert this to a dataframe that has the headers: text, score, and label (from the json dumps)?

Thank you in advance!!

Manoj Singh
  • 1,627
  • 12
  • 21

1 Answers1

0

Your json text as-is won't be easy to parse. One option is to collect the responses in a list and use that to create to write json and create a dataframe.

import json
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud.natural_language_understanding_v1 \
import Features, EntitiesOptions, KeywordsOptions

naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1(
   version='2018-09-21',
   iam_apikey='[KEY HIDDEN]',
   url='https://gateway.watsonplatform.net/natural-language-understanding/api')

responses = []
for tweet in nikedf["text"]:
    response = naturalLanguageUnderstanding.analyze(
      text=tweet,
      features=Features(
        entities=EntitiesOptions(
          emotion=False,
          sentiment=True,
          limit=2),
        keywords=KeywordsOptions(
          emotion=False,
          sentiment=True,
          limit=2))).get_result()
    responses.append(response)

Use the response list to create rdd and parse each row to create your desired columns:

from pyspark.sql import Row

#Row: text, score, and label 
def convert_to_row(response):
    rows = []
    for keyword in response['keywords']:
        row_dict = {}
        row_dict['text'] = keyword['text']
        row_dict['score'] = keyword['sentiment']['score']
        row_dict['label'] = keyword['sentiment']['label']
        row = Row(**row_dict)
        rows.append(row)
    return rows

sc.parallelize(responses) \
.flatMap(convert_to_row) \
.toDF().show()
Manoj Singh
  • 1,627
  • 12
  • 21