from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import geopandas as gpd
np.random.seed(42)
plt.rcParams['figure.figsize'] = (10,6)
Nov 10, 2021
import altair as alt
from vega_datasets import data as vega_data
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
I've extracted neighborhood Airbnb statistics for Philadelphia neighborhoods from Tom Slee's website.
The data includes average price per person, overall satisfaction, and number of listings.
Original research study: How Airbnb's Data Hid the Facts in New York City
The data is available in CSV format ("philly_airbnb_by_neighborhoods.csv") in the "data/" folder of the repository.
airbnb = pd.read_csv("data/philly_airbnb_by_neighborhoods.csv")
airbnb.head()
price_per_person
, overall_satisfaction
, N
# Initialize the Kmeans object
kmeans = KMeans(n_clusters=5, random_state=42)
# Scale the data features we want
scaler = StandardScaler()
scaled_airbnb_data = scaler.fit_transform(airbnb[['price_per_person', 'overall_satisfaction', 'N']])
# Run the fit!
kmeans.fit(scaled_airbnb_data)
# Save the cluster labels
airbnb['label'] = kmeans.labels_
# New column "label"!
airbnb.head()
To gain some insight into our clusters, after calculating the K-Means labels:
label
columnmean()
of each of our featuresairbnb.groupby('label', as_index=False).size()
airbnb.groupby('label', as_index=False).mean().sort_values(by='price_per_person')
airbnb.loc[airbnb['label'] == 2]
airbnb.loc[airbnb['label'] == 3]
airbnb.loc[airbnb['label'] == 4]
./data/philly_neighborhoods.geojson
categorical=True
and legend=True
keywords will be useful herehoods = gpd.read_file("./data/philly_neighborhoods.geojson")
hoods.head()
airbnb.head()
# do the merge
airbnb2 = hoods.merge(airbnb, left_on='Name', right_on='neighborhood', how='left')
# assign -1 to the neighborhoods without any listings
airbnb2['label'] = airbnb2['label'].fillna(-1)
# plot the data
airbnb2 = airbnb2.to_crs(epsg=3857)
# setup the figure
f, ax = plt.subplots(figsize=(10, 8))
# plot, coloring by label column
# specify categorical data and add legend
airbnb2.plot(
column="label",
cmap="Dark2",
categorical=True,
legend=True,
edgecolor="k",
lw=0.5,
ax=ax,
)
ax.set_axis_off()
plt.axis("equal");
Use altair to plot the clustering results with a tooltip for neighborhood name and tooltip.
Hint: See week 3B's lecture on interactive choropleth's with altair
# plot map, where variables ares nested within `properties`,
alt.Chart(airbnb2.to_crs(epsg=4326)).mark_geoshape().properties(
width=500, height=400,
).encode(
tooltip=["Name:N", "label:N"],
color=alt.Color("label:N", scale=alt.Scale(scheme="Dark2")),
)
Group 2!
Use the "Elbow" method)...
# Number of clusters to try out
n_clusters = list(range(2, 10))
# Run kmeans for each value of k
inertias = []
for k in n_clusters:
# Initialize and run
kmeans = KMeans(n_clusters=k)
kmeans.fit(scaled_airbnb_data)
# Save the "inertia"
inertias.append(kmeans.inertia_)
# Plot it!
plt.plot(n_clusters, inertias, marker='o', ms=10, mfc='white', lw=4, mew=3)
kneed
package¶There is also a nice package called kneed that can determine the “knee” point quantitatively, using the kneedle algorithm.
from kneed import KneeLocator
# Initialize the knee algorithm
kn = KneeLocator(n_clusters, inertias, curve='convex', direction='decreasing')
# Print out the knee
print(kn.knee)
Now on to the more traditional view of "clustering"...
"Density-Based Spatial Clustering of Applications with Noise"
min_samples
= 4. min_samples
(4) points (including the point itself) within a distance of eps
from each of these points.eps
) from the core points so they are part of the cluster min_pts
within a distance of eps
so they are not core pointseps
from any of the cluster pointsHigher min_samples
or a lower eps
requires a higher density necessary to form a cluster.
x
and y
in units of meterscoords = gpd.read_file('./data/osm_gps_philadelphia.geojson')
coords.head()
num_points = len(coords)
print(f"Total number of points = {num_points}")
from sklearn.cluster import dbscan
dbscan?
# some parameters to start with
eps = 50 # in meters
min_samples = 50
cores, labels = dbscan(coords[["x", "y"]], eps=eps, min_samples=min_samples)
The function returns two objects, which we call cores
and labels
. cores
contains the indices of each point which is classified as a core.
# The first 5 elements
cores[:5]
The length of cores tells you how many core samples we have:
num_cores = len(cores)
print(f"Number of core samples = {num_cores}")
The labels
tells you the cluster number each point belongs to. Those points classified as noise receive a cluster number of -1
:
# The first 5 elements
labels[:5]
The labels
array is the same length as our input data, so we can add it as a column in our original data frame
# Add our labels to the original data
coords['label'] = labels
coords.head()
The number of clusters is the number of unique labels minus one (because noise has a label of -1)
num_clusters = coords['label'].nunique() - 1
print(f"number of clusters = {num_clusters}")
We can group by the label
column to get the size of each cluster:
cluster_sizes = coords.groupby('label', as_index=False).size()
cluster_sizes
# All points get assigned a cluster label (-1 reserved for noise)
cluster_sizes['size'].sum() == num_points
The number of noise points is the size of the cluster with label "-1":
num_noise = cluster_sizes.iloc[0]['size']
print(f"number of noise points = {num_noise}")
If points aren't noise or core samples, they must be edges:
num_edges = num_points - num_cores - num_noise
print(f"Number of edge points = {num_edges}")
x
and mean y
value for each cluster# Setup figure and axis
f, ax = plt.subplots(figsize=(10, 10), facecolor="black")
# Plot the noise samples in grey
noise = coords.loc[coords["label"] == -1]
ax.scatter(noise["x"], noise["y"], c="grey", s=5, linewidth=0)
# Loop over each cluster number
for label_num in range(0, num_clusters):
# Extract the samples with this label number
this_cluster = coords.loc[coords["label"] == label_num]
# Calculate the mean (x,y) point for this cluster in red
x_mean = this_cluster["x"].mean()
y_mean = this_cluster["y"].mean()
# Plot this centroid point in red
ax.scatter(x_mean, y_mean, linewidth=0, color="red")
# Format
ax.set_axis_off()
ax.set_aspect("equal")
DBSCAN can perform high-density clusters from more than just spatial coordinates, as long as they are properly normalized
I've extracted data for taxi pickups or drop offs occurring in the Williamsburg neighborhood of NYC from the NYC taxi open data.
Includes data for:
Goal: identify clusters of similar taxi rides that are not only clustered spatially, but also clustered for features like hour of day and trip distance
Inspired by this CARTO blog post
taxi = pd.read_csv("./data/williamsburg_taxi_trips.csv")
taxi.head()
We will focus on the following columns:
pickup_x
and pickup_y
dropoff_x
and dropoff_y
trip_distance
pickup_hour
Use the StandardScaler
to normalize these features.
feature_columns = [
"pickup_x",
"pickup_y",
"dropoff_x",
"dropoff_y",
"trip_distance",
"pickup_hour",
]
features = taxi[feature_columns].copy()
features.head()
# Scale these features
scaler = StandardScaler()
scaled_features = scaler.fit_transform(features)
scaled_features
print(scaled_features.shape)
print(features.shape)
Run the DBSCAN and experiment with different values of eps
and min_samples
eps
of 0.25 and min_samples
of 50 Add the labels to the original data frame and calculate the number of clusters. It should be less than 50 or so.
Hint: If the algorithm is taking a long time to run (more than a few minutes), the eps
is probably too big!
# Run DBSCAN
cores, labels = dbscan(scaled_features, eps=0.25, min_samples=50)
# Add the labels back to the original (unscaled) dataset
features['label'] = labels
# Extract the number of clusters
num_clusters = features['label'].nunique() - 1
print(num_clusters)
features
Group by the label, calculate and sort the sizes to find the label numbers of the top 5 largest clusters
N = features.groupby('label').size()
print(N)
# sort from largest to smallest
N = N.sort_values(ascending=False)
N
# extract labels (ignoring label -1 for noise)
top5 = N.iloc[1:6]
top5
top5_labels = top5.index.tolist()
top5_labels
To better identify trends in the top 5 clusters, calculate the mean trip distance and pickup_hour for each of the clusters.
# get the features for the top 5 labels
selection = features['label'].isin(top5_labels)
# select top 5 and groupby by the label
grps = features.loc[selection].groupby('label')
# calculate average pickup hour and trip distance per cluster
avg_values = grps[['pickup_hour', 'trip_distance']].mean()
avg_values
Now visualize the top 5 largest clusters:
Hints:
# a good color scheme for a black background
colors = ['aqua', 'lime', 'red', 'fuchsia', 'yellow']
# EXAMPLE: enumerating a list
for i, label_num in enumerate([0, 1, 2, 3, 4]):
print(f"i = {i}")
print(f"label_num = {label_num}")
# Setup figure and axis
f, ax = plt.subplots(figsize=(10, 10), facecolor="black")
# Plot noise in grey
noise = features.loc[features["label"] == -1]
ax.scatter(noise["pickup_x"], noise["pickup_y"], c="grey", s=5, linewidth=0)
# specify colors for each of the top 5 clusters
colors = ["aqua", "lime", "red", "fuchsia", "yellow"]
# loop over top 5 largest clusters
for i, label_num in enumerate(top5_labels):
print(f"Plotting cluster #{label_num}...")
# select all the samples with label equals "label_num"
this_cluster = features.loc[features["label"] == label_num]
# plot pickups
ax.scatter(
this_cluster["pickup_x"],
this_cluster["pickup_y"],
linewidth=0,
color=colors[i],
s=5,
alpha=0.3,
)
# plot dropoffs
ax.scatter(
this_cluster["dropoff_x"],
this_cluster["dropoff_y"],
linewidth=0,
color=colors[i],
s=5,
alpha=0.3,
)
# Display the figure
ax.set_axis_off()
Another good way to visualize the results is to explore the other clusters one at a time, plotting both the pickups and dropoffs to identify the trends.
Use different colors for pickups/dropoffs to easily identify them.
Make it a function so we can repeat it easily:
def plot_taxi_cluster(label_num):
"""
Plot the pickups and dropoffs for the input cluster label
"""
# Setup figure and axis
f, ax = plt.subplots(figsize=(10, 10), facecolor='black')
# Plot noise in grey
noise = features.loc[features['label']==-1]
ax.scatter(noise['pickup_x'], noise['pickup_y'], c='grey', s=5, linewidth=0)
# get this cluster
this_cluster = features.loc[features['label']==label_num]
# Plot pickups in fuchsia
ax.scatter(this_cluster['pickup_x'], this_cluster['pickup_y'],
linewidth=0, color='fuchsia', s=5, alpha=0.3)
# Plot dropoffs in aqua
ax.scatter(this_cluster['dropoff_x'], this_cluster['dropoff_y'],
linewidth=0, color='aqua', s=5, alpha=0.3)
# Display the figure
ax.set_axis_off()
plt.show()
# plot specific label
plot_taxi_cluster(label_num=6)
.hvplot.scatter()
function to plot the x/y pointsc=
keyword as the column holding the cluster labelcolor_key
as the cmap=
keywordds.count_cat("label")
— this will color the data points using our categorical color mapdatashade=True
keyword to tell hvplot to use datashader# map colors to the top 5 cluster labels
color_key = dict(zip(top5_labels, ['aqua', 'lime', 'red', 'fuchsia', 'yellow']))
print(color_key)
# extract the features for the top 5 clusters
top5_features = features.loc[features['label'].isin(top5_labels)]
import hvplot.pandas
import datashader as ds
import geoviews as gv
# Pickups using a categorical color map
pickups = top5_features.hvplot.scatter(
x="pickup_x",
y="pickup_y",
c="label",
aggregator=ds.count_cat("label"),
datashade=True,
cmap=color_key,
width=800,
height=600,
)
# Dropoffs using a categorical color map
dropoffs = top5_features.hvplot.scatter(
x="dropoff_x",
y="dropoff_y",
c="label",
aggregator=ds.count_cat("label"),
datashade=True,
cmap=color_key,
width=800,
height=600,
)
pickups * dropoffs * gv.tile_sources.CartoDark