# Modeling the Joint Distribution of Wind Speed and Direction using Gaussain Mixture Models¶

OEN Method: Harris, Cook The parent wind speed distribution: Why Weibull? http://www.sciencedirect.com/science/article/pii/S0167610514001056

Gaussian Mixture Models, http://scikit-learn.org/stable/modules/mixture.html

# 1. Set up¶

## 1.1 Environment¶

In [1]:
%matplotlib inline

from import_file import *
from helpers.parallel_helper import *

plt.rcParams['axes.autolimit_mode'] = 'round_numbers'
plt.rcParams['axes.xmargin'] = 0.
plt.rcParams['axes.ymargin'] = 0.
mpl.rcParams['patch.force_edgecolor'] = True


In [2]:
# file_path, bandwidth= './data/NCDC/europe/uk/marham/dat.txt', 1.7
# file_path, bandwidth= './data/NCDC/europe/uk/tiree/dat.txt', 1.9
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/NCDC/europe/uk/boscombe_down/dat.txt', 1.5, 4
# file_path, bandwidth= './data/NCDC/europe/uk/middle_wallop/dat.txt', 1.3
# file_path, bandwidth= './data/NCDC/europe/uk/bournemouth/dat.txt',1.3 # 4?
# file_path= "./data/NCDC/europe/uk/weybourne/dat.txt"
# file_path= "./data/NCDC/europe/uk/skye_lusa/dat.txt" #
# file_path= "./data/NCDC/europe/uk/wattisham/dat.txt"
# file_path= "./data/NCDC/europe/uk/south_uist_range/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/europe/uk/holbeach/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/europe/uk/cambridge/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/europe/us/baltimore/dat.txt" # time too short
# file_path= "./data/NCDC/europe/uk/bealach_na_ba/dat.txt" # time too short
# file_path= "./data/NCDC/europe/uk/benbecula/dat.txt" # truncate (untruncate in m/s), 4?
# file_path= './data/NCDC/europe/uk/southhamption/dat.txt' # high 0, trend

file_path, bandwidth, NUMBER_OF_GAUSSIAN = "./data/NCDC/europe/germany/landsberg_lech/dat.txt", 0.9, 4
# file_path, bandwidth= "./data/NCDC/europe/germany/neuburg/dat.txt", 0.7
# file_path, bandwidth= "./data/NCDC/europe/germany/laupheim/dat.txt", 0.7 # double peak, 4?, trend
# file_path, bandwidth= './data/NCDC/europe/germany/niederstetten/dat.txt', 0.9 # get the peak
# file_path, bandwidth= "./data/NCDC/europe/germany/holzdorf/dat.txt", 0.9 # 2008 year
# file_path, bandwidth, NUMBER_OF_GAUSSIAN= './data/NCDC/europe/france/nantes/dat.txt', 0.9, 4 # unit shift, one direction deviate big
# file_path, convert_to_knot= './data/NCDC/europe/france/pau_pyrenees/dat.txt', True # unit shift, 2; force using knot
# file_path= "./data/NCDC/europe/france/avord/dat.txt" # try 4, initial speed (should be good with m/s), incompete dataset
# file_path= "./data/NCDC/europe/france/vatry/dat.txt"  # double peak, initial speed, incompete dataset
# file_path, bandwidth, NUMBER_OF_GAUSSIAN= "./data/NCDC/europe/spain/valladolid/dat.txt", 1.1, 4
# file_path= './data/NCDC/europe/spain/jerez/dat.txt' # high 0
# file_path, bandwidth= "./data/NCDC/europe/spain/barayas/dat.txt", 0.7 # not good fit
# file_path, bandwidth= './data/NCDC/europe/spain/malaga/dat.txt', 0.7 # directions blocked?
# file_path, bandwidth= './data/NCDC/europe/spain/tenerife_sur/dat.txt', 0.7 # directions blocked?
# file_path, bandwidth= './data/NCDC/europe/spain/almeria/dat.txt', 0.7 # negative dimensions?
# file_path, bandwidth= './data/NCDC/europe/greece/eleftherios_intl/dat.txt',0.7 # some direction might be blocked
# file_path= './data/NCDC/europe/ciampino/dat.txt' # try 4, bandwidth?
# file_path= "./data/NCDC/europe/huspel_aws/dat.txt"  # integer, 4?
# file_path= './data/NCDC/gibraltar/dat.txt' # bad fit

# MidEast
# file_path, bandwidth= './data/NCDC/mideast/uae/al_maktoum/dat.txt', 1.1
# file_path= './data/NCDC/mideast/uae/sharjah_intl/dat.txt'
# file_path= './data/NCDC/mideast/uae/dubai_intl/dat.txt'
# file_path= './data/NCDC/mideast/uae/abu_dhabi_intl/dat.txt' # Time shift
# file_path= './data/NCDC/mideast/uae/bateen/dat.txt' # Time shift
# file_path= './data/NCDC/mideast/buraimi/dat.txt' # not good dataset
# file_path= './data/NCDC/mideast/turkey/konya/dat.txt'
# file_path= './data/NCDC/mideast/turkey/sivas/dat.txt' # bad dataset
# file_path= './data/NCDC/mideast/turkey/balikesir/dat.txt' # bad dataset
# file_path= './data/NCDC/mideast/turkey/bartin/dat.txt' # bad dataset
# file_path= './data/NCDC/mideast/iran/chahbahar/dat.txt'
# file_path= './data/NCDC/mideast/iran/zabol/dat.txt' # Problematic data
# file_path= './data/NCDC/mideast/iran/torbat_heydarieh/dat.txt' # Unusable

# file_path, bandwidth = "./data/NCDC/cn/shanghai/hongqiao_intl/dat.txt", 0.6
# file_path, bandwidth= "./data/NCDC/cn/shanghai/pudong/dat.txt", 0.8
# file_path, bandwidth= "./data/NCDC/cn/hefei_luogang/dat.txt", 0.6 # few 0, trend
# file_path, bandwidth= "./data/NCDC/cn/nanjing_lukou/dat.txt", 0.5
# file_path= "./data/NCDC/cn/zhengzhou_xinzheng/dat.txt"
# file_path= "./data/NCDC/cn/tianjin/binhai/dat.txt" # few 0, trend, stationary speed, unstationary direction
# file_path= "./data/NCDC/cn/tianjin/tianjing/dat.txt" # 16 sectors
# file_path= "./data/NCDC/cn/shijiazhuang_zhengding/dat.txt"
# file_path= "./data/NCDC/cn/henan_gushi/dat.txt" # 16 sectors, fit not very good
# file_path= "./data/NCDC/cn/nanning_wuxu/dat.txt" # numpy priblem, unstationary speed
# file_path= './data/NCDC/cn/macau/dat.txt'
# file_path= "./data/NCDC/cn/hk_intl/dat.txt" # few 0
# file_path= './data/NCDC/cn/gaoqi/dat.txt'

# file_path= './data/NCDC/southeast_asia/malaysia/mersing/dat.txt' # 2 mode, paper comparison
# file_path= './data/NCDC/southeast_asia/malaysia/penang/dat.txt'
# file_path= './data/NCDC/southeast_asia/malaysia/butterworth/dat.txt' # 2 mode
# file_path= "./data/NCDC/southeast_asia/malaysia/bsultan_mahmud/dat.txt" # stable
# file_path= "./data/NCDC/southeast_asia/malaysia/bsultan_ismail/dat.txt" #
# file_path= "./data/NCDC/southeast_asia/singapore/changi/dat.txt" # trend, no 0, questionary data
# file_path= "./data/NCDC/southeast_asia/singapore/paya_lebar/dat.txt" # questionary data
# file_path= "./data/NCDC/southeast_asia/singapore/seletar/dat.txt"
# file_path= "./data/NCDC/east_asia/cheongju_intl/dat.txt" # 2005-2009  may have problem, fit is good; numpy problem
# file_path= "./data/NCDC/east_asia/daegu_ab/dat.txt" # recent 5 year may have problem, but fit is generally good; numpy problem

# file_path, bandwidth= "./data/NCDC/oceania/auckland_intl/dat.txt", 0.9  # Good data, double mode
# file_path= "./data/NCDC/oceania/brisbane_archerfield/dat.txt" # high 0, few data
# file_path= "./data/NCDC/oceania/narrandera/dat.txt" # high 0, few data
# file_path, bandwidth= "./data/NCDC/oceania/canberra/dat.txt", 0.7 # high 0, bad fit
# file_path, bandwidth, NUMBER_OF_GAUSSIAN= './data/NCDC/oceania/horsham/dat.txt', 0.9, 4 # get the peak

# file_path, bandwidth= './data/NCDC/us/boston_16nm/dat.txt', 0.9 # Offshore, mixed type

# file_path, bandwidth, NUMBER_OF_GAUSSIAN  = './data/asos/bismarck_ND/hr_avg.csv', 1.1, 4
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/asos/aberdeen_SD/hr_avg.csv', 1.7, 2 # only to 2012
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/asos/minneapolis/hr_avg.csv', 1.1, 4
# file_path, bandwidth = './data/asos/lincoln_NE/hr_avg.csv', 0.9
# file_path, bandwidth = './data/asos/des_moines_IA/hr_avg.csv', 1.3
# file_path, bandwidth = './data/asos/springfield_IL/hr_avg.csv', 1.1
# file_path, bandwidth = './data/asos/topeka/hr_avg.csv', 0.7 # High 0
# file_path, bandwidth = './data/asos/denver/hr_avg.csv', 1.3
# file_path, bandwidth= './data/asos/olympia/hr_avg.csv', 0.5 # might block

# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/NDAWN/baker/hr_avg.csv', 0.7, 4
# file_path, bandwidth = './data/NDAWN/dickinson/hr_avg.csv', 0.6
# file_path = './data/NDAWN/rugby/hr_avg.csv'
# file_path = './data/NDAWN/bowman/hr_avg.csv'
# file_path = './data/NDAWN/grand_forks/hr_avg.csv'
# file_path = './data/NDAWN/williston/hr_avg.csv'
# file_path = './data/NDAWN/jamestown/hr_avg.csv'

# file_path, bandwidth, NUMBER_OF_GAUSSIAN = 'data/ECMWF/usa/47N123W/dat.csv', 0.7, 4 #good
# file_path, bandwidth = 'data/ECMWF/venezuela/8N67W/dat.csv', 0.7 # good, but the data might be problematic.
# file_path, bandwidth = 'data/ECMWF/chile/52S75W/dat.csv', 1.9 # good
# file_path, bandwidth= 'data/ECMWF/iceland/65N17W/dat.csv', 1.9 # good
# file_path, bandwidth, NUMBER_OF_GAUSSIAN  = 'data/ECMWF/germany/49N9E/dat.csv', 0.9, 4 # good
# file_path, bandwdith = 'data/ECMWF/sudan/18N32E/dat.csv', 1.1 # good
# file_path, bandwidth = 'data/ECMWF/china/24N121E/dat.csv', 0.9 # good
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = 'data/ECMWF/australia/37S142E/dat.csv', 0.7, 4 # miss the peak, force bandwidth 0.7, default 0.9

In [3]:
if "cn_database" in file_path:
elif 'NCDC' in file_path:
df.rename(columns={'Date':'date','Dir':'dir','Spd':'speed','Type':'type','I.1':'wind_type'}, inplace=True)
df = df[['date','HrMn','type','dir','speed','wind_type' ]]
df.dropna(subset=['dir','speed'], inplace=True)
integer_data = True
elif 'NDAWN' in file_path:
df['type']='default'
df['wind_type']='default'
df = df.dropna()
convert_to_knot = False
integer_data = False
elif 'asos' in file_path:
# ASOS
df['type']='default'
df['wind_type']='default'
df = df.dropna()
convert_to_knot = False
integer_data = False
knot_unit = True
else:
df.rename(columns={'U':'x','V':'y'}, inplace=True)
df.x=-df.x
df.y=-df.y
df['speed']=np.sqrt(df.x**2+df.y**2)
df['dir']=np.degrees(np.arctan2(df.y, df.x))%360
df['time']=pd.to_datetime('1979-01-01T00:00:00Z')+pd.to_timedelta(df['time'], unit='h')
df['date']=df['time'].dt.strftime('%Y%m%d')
df['date']=df['date'].astype(int)
df['HrMn']=df['time'].dt.strftime('%H00')
df['type']='default'
df['wind_type']='default'
convert_to_knot = True
integer_data = False
cartesian = True

In [4]:
df

Out[4]:
date HrMn type dir speed wind_type
0 19900101 0000 FM-12 999 0.0 C
1 19900101 0050 FM-15 999 0.0 C
2 19900101 0150 FM-15 999 0.0 C
3 19900101 0300 SY-MT 999 0.0 C
4 19900101 0400 FM-15 999 0.0 C
5 19900101 0450 FM-15 999 0.0 C
6 19900101 0600 FM-15 999 0.0 C
7 19900101 0650 FM-15 999 0.0 C
8 19900101 0750 FM-15 999 0.0 C
9 19900101 0900 FM-15 999 0.0 C
10 19900101 1050 FM-15 999 0.0 C
11 19900101 1150 FM-15 360 0.5 N
12 19900101 1250 FM-15 20 1.5 N
13 19900101 1400 FM-15 30 1.0 N
14 19900101 1450 FM-15 30 0.5 N
15 19900101 1600 FM-15 999 0.0 C
16 19900101 1630 FM-15 999 0.0 C
17 19900101 1650 FM-15 60 0.5 N
18 19900101 1750 FM-15 190 0.5 N
19 19900101 1900 FM-15 180 0.5 N
20 19900101 2000 FM-15 170 1.0 N
21 19900101 2100 SY-MT 170 0.5 N
22 19900101 2200 FM-15 180 0.5 N
23 19900101 2250 FM-15 190 1.0 N
24 19900101 2350 FM-15 180 0.5 N
25 19900102 0000 SY-MT 190 1.0 N
26 19900102 0150 FM-15 260 0.5 N
27 19900102 0250 FM-15 190 1.5 N
28 19900102 0300 FM-12 190 1.5 N
29 19900102 0400 FM-15 200 1.0 N
... ... ... ... ... ... ...
333361 20160131 2320 FM-15 250 9.8 N
333362 20160131 2346 FM-16 240 11.8 N
333363 20160201 0020 FM-15 240 12.9 N
333364 20160201 0120 FM-15 240 13.4 N
333365 20160201 0220 FM-15 240 13.4 N
333366 20160201 0320 FM-15 240 13.4 N
333367 20160201 0420 FM-15 240 13.4 N
333368 20160201 0520 FM-15 240 13.9 N
333369 20160201 0620 FM-15 250 14.9 N
333370 20160201 0625 FM-16 250 13.9 N
333371 20160201 0720 FM-15 250 14.9 N
333372 20160201 0820 FM-15 240 11.3 N
333373 20160201 0920 FM-15 240 11.8 N
333374 20160201 1020 FM-15 240 11.8 N
333375 20160201 1048 FM-16 240 11.8 N
333376 20160201 1120 FM-15 240 11.8 N
333377 20160201 1211 FM-16 240 9.3 N
333378 20160201 1220 FM-15 240 12.4 N
333379 20160201 1320 FM-15 250 12.4 N
333380 20160201 1420 FM-15 250 12.9 N
333381 20160201 1520 FM-15 240 12.4 N
333382 20160201 1604 FM-16 240 9.3 N
333383 20160201 1620 FM-15 240 10.3 N
333384 20160201 1720 FM-15 240 6.7 N
333385 20160201 1820 FM-15 270 7.2 N
333386 20160201 1920 FM-15 260 6.7 N
333387 20160201 2020 FM-15 260 8.8 N
333388 20160201 2120 FM-15 250 9.8 N
333389 20160201 2220 FM-15 250 8.8 N
333390 20160201 2320 FM-15 240 9.8 N

333391 rows Ã— 6 columns

In [5]:
if 'NCDC' in file_path:
lat, long = get_lat_long(file_path)
print(lat,long)
map_osm = folium.Map(location=[lat, long], zoom_start=4)
display(map_osm)

48.071 10.906

In [6]:
df['time']=pd.to_datetime(df["date"].astype(str).map(str) + df["HrMn"], format='%Y%m%d%H%M')
df.set_index(['time'], inplace=True)
df['HrMn']=df['HrMn'].astype(int)
df = df.query("(dir <= 999) & (speed < 100) ")['1970':'2016']

In [7]:
plot_speed_and_angle_distribution(df.speed, df.dir)

D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
warnings.warn(self.msg_depr % (key, alt_key))

In [8]:
# Dir [10,360]=> [0,350]
df['dir'] = df['dir'].apply(lambda x: x%360 if x < 999 else x)
# Convert Windrose coordianates to Polar Cooridinates
if 'cartesian' in globals():
df['dir_windrose'] = df['dir'].apply(lambda x: (90 - x)%360 if x < 999 else x)
else:
df['dir_windrose'] = df['dir']
df['dir'] = df['dir'].apply(lambda x: (90 - x)%360 if x < 999 else x)
display(df.describe())
df.plot(y='speed',legend=True,figsize=(20,5))

date HrMn dir speed dir_windrose
count 3.332600e+05 333260.000000 333260.000000 333260.000000 333260.000000
mean 2.003340e+07 1166.548146 206.404069 3.225488 208.309536
std 7.020275e+04 687.794956 169.049772 2.319920 167.356881
min 1.990010e+07 0.000000 0.000000 0.000000 0.000000
25% 1.998072e+07 600.000000 110.000000 1.500000 110.000000
50% 2.004020e+07 1120.000000 200.000000 2.600000 210.000000
75% 2.009082e+07 1720.000000 250.000000 4.100000 250.000000
max 2.016020e+07 2359.000000 999.000000 34.500000 999.000000
Out[8]:
<matplotlib.axes._subplots.AxesSubplot at 0xcc40390>

## 1.3 General Data Info¶

### 1.3.1 Unit Detection¶

In [9]:
df['decimal'] = df.speed % 1
df.decimal.hist(alpha=0.5, label='m/s', figsize=(4, 3))

if 'convert_to_knot' not in globals():
convert_to_knot = True if len(df.query('decimal >= 0.2')) / len(df) > 0.3 else False

if convert_to_knot:
knot_unit = True
df['speed'] = df['speed'] * 1.943845
df['decimal'] = df.speed % 1
df.decimal.hist(alpha=0.5, label='knot')
# need more elaboration, some is not near an integer
if integer_data:
df['speed'] = df['speed'].apply(lambda x: int(round(x)))
plt_configure(xlabel='Decimal', ylabel='Frequency', legend={'loc': 'best'}, title='Decimal Distribution')
else:
if 'knot_unit' not in globals():
knot_unit = False

df.drop(['decimal'], 1,inplace=True)
print(knot_unit)

True

In [10]:
dir_unit_text = ' (degree)'
if knot_unit == True:
speed_unit_text = ' (knot)'
else:
speed_unit_text = ' (m/s)'


### 1.3.2 Sampling Type Selection¶

In [11]:
sample_type = df.query('date > 20000000')['type']
sample_type.value_counts().plot(
kind = 'bar', title = 'Report Types Comprisement', figsize=(4,3))

report_type_most_used = sample_type.value_counts().argmax()
df = df.query("type==@report_type_most_used")


### 1.3.3 Sampling Time Selection¶

In [12]:
MID_YEAR = int(np.average(df.index.year))

df['HrMn'].value_counts().sort_index().plot(kind='bar', alpha=0.5,label='Overall')
df[str(MID_YEAR):]['HrMn'].value_counts().sort_index().plot(
kind='bar', alpha=0.5, label='>= %s' %  MID_YEAR )

plt_configure(xlabel='Sampling Time', ylabel='Frequency', legend={'loc':'best'}, figsize=(8, 4),
title = 'Sampling Time Distribution, Overall and > %s ' %  MID_YEAR)

In [13]:
df['sample_time'] = df.HrMn % 100
sample_time = df['2000':]['sample_time']
sample_times = sample_time.value_counts()[sample_time.value_counts() > 2000]
sample_times = sample_times.index.tolist()
# df = df.query("sample_time in @sample_times")
df = df.query("sample_time == @sample_times[0]")
df.drop(['sample_time'], 1,inplace=True)
print(sample_times)

df['HrMn'].value_counts().sort_index().plot(kind='bar', alpha=0.5, figsize=(10, 4))

[20]

Out[13]:
<matplotlib.axes._subplots.AxesSubplot at 0xce09dd8>

## 1.4 Error Data handling and Adjustment¶

### 1.4.1 Artefacts¶

wrong direction record

In [14]:
if integer_data:
display(df.query("(dir % 10 >= 0.1) & (dir != 999)"))
df = df.query('(dir % 10 <= 0.1) | (dir == 999)')

date HrMn type dir speed wind_type dir_windrose
time
1994-03-22 18:20:00 19940322 1820 FM-15 176 4 N 274
1996-04-18 17:20:00 19960418 1720 FM-15 88 4 N 2
1998-02-05 13:20:00 19980205 1320 FM-15 87 2 N 3
1998-11-11 00:20:00 19981111 20 FM-15 159 14 N 291
2000-02-20 15:20:00 20000220 1520 FM-15 157 7 N 293
2000-05-20 08:20:00 20000520 820 FM-15 193 13 N 257
2000-10-27 00:20:00 20001027 20 FM-15 229 7 N 221
2000-12-06 01:20:00 20001206 120 FM-15 85 3 N 5
2001-03-02 12:20:00 20010302 1220 FM-15 221 4 N 229
2001-05-21 08:20:00 20010521 820 FM-15 29 34 N 61
2002-03-16 16:20:00 20020316 1620 FM-15 32 8 N 58
2002-04-06 03:20:00 20020406 320 FM-15 83 5 N 7
2002-06-09 17:20:00 20020609 1720 FM-15 206 4 N 244
2002-08-13 09:20:00 20020813 920 FM-15 159 2 N 291
2002-12-30 02:20:00 20021230 220 FM-15 198 25 N 252
2003-01-27 05:20:00 20030127 520 FM-15 209 2 N 241
2003-02-09 03:20:00 20030209 320 FM-15 85 3 N 5
2003-02-21 16:20:00 20030221 1620 FM-15 1 4 N 89
2003-12-06 08:20:00 20031206 820 FM-15 163 14 N 287
2005-07-24 10:20:00 20050724 1020 FM-15 195 5 N 255
2005-11-25 15:20:00 20051125 1520 FM-15 239 8 N 211
2005-12-31 11:20:00 20051231 1120 FM-15 216 14 N 234
2006-01-13 05:20:00 20060113 520 FM-15 229 2 N 221
2006-03-10 02:20:00 20060310 220 FM-15 209 67 N 241
2006-03-10 03:20:00 20060310 320 FM-15 209 67 N 241
2006-12-04 00:20:00 20061204 20 FM-15 178 28 N 272

sudden increase in speed

In [15]:
# sudden increse
df['incre'] = df.speed.diff(1)
df['incre'].fillna(0, inplace=True)
df['incre_reverse'] = df.speed.diff(-1)
df['incre_reverse'].fillna(0, inplace=True)

df['incre'].plot(kind='hist', bins=arange(-15, 15), legend=True, figsize=(8, 3))

date HrMn type dir speed wind_type dir_windrose incre incre_reverse
time
2006-10-13 22:20:00 20061013 2220 FM-15 60 53 N 30 50.0 48.0
2015-11-15 11:20:00 20151115 1120 FM-15 200 52 N 250 27.0 22.0
2001-06-17 00:20:00 20010617 20 FM-15 270 47 N 180 42.0 39.0
1998-02-14 17:20:00 19980214 1720 FM-15 40 46 N 50 40.0 43.0
2007-01-25 03:20:00 20070125 320 FM-15 150 45 N 300 40.0 42.0
2002-10-03 03:20:00 20021003 320 FM-15 350 44 N 100 40.0 39.0
2006-06-14 06:20:00 20060614 620 FM-15 250 44 N 200 40.0 41.0
2006-04-29 00:20:00 20060429 20 FM-15 170 42 N 280 40.0 39.0
2005-12-21 17:20:00 20051221 1720 FM-15 270 42 N 180 40.0 38.0
1995-01-26 22:20:00 19950126 2220 FM-15 200 40 N 250 18.0 13.0
Out[15]:
<matplotlib.axes._subplots.AxesSubplot at 0xcd463c8>
In [16]:
incre_threshold = 20 if knot_unit else 10
print('sudden increase number', len(df.query('(incre > @incre_threshold )&(incre_reverse > @incre_threshold )')))
df = df.query('(incre < @incre_threshold )|(incre_reverse < @incre_threshold )')

# Check the max speed
df.drop(['incre', 'incre_reverse'], 1, inplace=True)

sudden increase number 17

date HrMn type dir speed wind_type dir_windrose incre incre_reverse
time
1995-01-26 22:20:00 19950126 2220 FM-15 200 40 N 250 18.0 13.0
1999-12-26 13:20:00 19991226 1320 FM-15 190 39 N 260 6.0 1.0
2015-03-31 09:20:00 20150331 920 FM-15 200 39 N 250 1.0 4.0
2015-03-31 11:20:00 20150331 1120 FM-15 190 39 N 260 4.0 2.0
2015-03-31 08:20:00 20150331 820 FM-15 210 38 N 240 1.0 -1.0
2005-12-16 14:20:00 20051216 1420 FM-15 210 38 N 240 6.0 4.0
1999-12-26 14:20:00 19991226 1420 FM-15 190 38 N 260 -1.0 8.0
2007-01-18 22:20:00 20070118 2220 FM-15 210 38 N 240 3.0 6.0
2015-03-31 14:20:00 20150331 1420 FM-15 200 38 N 250 2.0 6.0
2003-01-02 20:20:00 20030102 2020 FM-15 200 38 N 250 5.0 8.0

### 1.4.2 Direction re-aligment¶

For some dataset, the 16 sectors are not record properly,

e.g. the sectors are [0,20,50 ...], need to redistribute the angle into 22.5, e.g. [0, 22.5, 45...]

In [17]:
display(df['dir'].value_counts().sort_index())
effective_column = df.query('dir < 999')['dir'].value_counts()[df['dir'].value_counts() > 30].sort_index()
if integer_data:
SECTOR_LENGTH = 360/len(effective_column)
else:
SECTOR_LENGTH = 10
print(len(effective_column), SECTOR_LENGTH)

0       2604
10      3772
20      4141
30      4787
40      3730
50      3786
60      3238
70      3294
80      2445
90      2218
100     1621
110     1844
120     1687
130     1865
140     1811
150     2383
160     2799
170     4118
180     4730
190     6997
200     9256
210    12291
220     9079
230     7033
240     4321
250     4491
260     4051
270     4781
280     3931
290     4075
300     3375
310     3279
320     2623
330     2466
340     2004
350     2434
999     4194
Name: dir, dtype: int64
36 10.0

In [18]:
df=realign_direction(df, effective_column)


### 1.4.3 0 Speed¶

In [19]:
with_too_many_zero, null_wind_frequency = is_with_too_many_zero(df['2005':])
delete_zero = with_too_many_zero
if delete_zero:
df = df.query('(speed > 0)')
print(delete_zero, null_wind_frequency)

False 0.00677323785894

In [20]:
print(df.query('dir == 999')['speed'].value_counts())
df=fill_direction_999(df, SECTOR_LENGTH)

2    1718
1    1129
0     945
3     395
4       5
9       1
5       1
Name: speed, dtype: int64


## 1.5 Time Shift Comparison¶

In [21]:
DIR_REDISTRIBUTE = 'even'
if DIR_REDISTRIBUTE == 'even':
DIR_BIN = arange(-5, 360, 10)
elif DIR_REDISTRIBUTE == 'round_up':
DIR_BIN = arange(0, 360+10, 10)

# Comparison between mid_year, looking for:
# 1. Odd Even Bias
# 2. Time Shift of Wind Speed Distribution
bins = arange(0, df.speed.max() + 1)
df[:str(MID_YEAR)]['speed'].plot(
kind='hist', alpha=0.5,bins=bins, label='< %s' % MID_YEAR)

df[str(MID_YEAR+1):]['speed'].plot(
kind='hist', alpha=0.5,bins=bins, label='> %s' % MID_YEAR)

plt.suptitle('Speed Comparison between year < %s, > %s ' % (MID_YEAR, MID_YEAR), fontsize = 14)
plt_configure(xlabel='Speed', ylabel='Frequency', legend=True, figsize=(8, 3))

In [22]:
df[:str(MID_YEAR)]['dir'].plot(
kind='hist', alpha=0.5,bins=DIR_BIN, label='< %s' % MID_YEAR)

df[str(MID_YEAR+1):]['dir'].plot(
kind='hist', alpha=0.5,bins=DIR_BIN, label='> %s' % MID_YEAR)

plt.suptitle('Dir Comparison between year < %s, and > %s ' % (MID_YEAR, MID_YEAR), fontsize = 14)
plt_configure(xlabel='Dir', ylabel='Frequency', legend={'loc':'best'}, figsize=(8, 3), tight='x')

In [23]:
display(df[df['dir'].isnull()])
df.dropna(subset=['dir'], inplace=True)

date HrMn type dir speed wind_type dir_windrose
time
In [24]:
# Inspect the time shift of speed and degree distribution, and odd-even bias
check_time_shift(df, speed_unit_text=speed_unit_text, dir_unit_text=dir_unit_text)

1993 - 1995

D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
warnings.warn(self.msg_depr % (key, alt_key))

1996 - 2000

2001 - 2005

2006 - 2010

2011 - 2015

In [25]:
df.resample('A').mean().plot(y='speed', figsize=(4,3))
plt.gca().set_ylim(bottom=0)
df.resample('M').mean().plot(y='speed', figsize=(20,4))
plt.gca().set_ylim(bottom=0)

Out[25]:
(0, 22.5)
In [26]:
%%time
for column in ['speed', 'dir']:
if column == 'speed':
bins = arange(0, df[column].max()+1, 1)
else:
bins = arange(0, 361, 10)
den, _ = np.histogram(df[column], bins=bins, density=True)
y_top=max(den)*1.2
for year in arange(1980, 2016):
end_year = year
sub_df = df[str(year):str(end_year)]
if len(sub_df) > 1000:
plt.figure()
df[column].hist(bins=bins, alpha=0.3, normed=True)
sub_df[column].hist(bins=bins, alpha=0.5, figsize=(3,1.5), normed=True)
plt.gca().set_ylim(top=y_top)
plt_configure(title=str(year))
align_figures()

Wall time: 13.7 s

In [27]:
for column in ['speed', 'dir']:
if column == 'speed':
bins = arange(0, df[column].max()+1, 1)
else:
bins = arange(0, 361, 10)
density_all, _ = np.histogram(df[column], bins=bins, density=True)
df[column].hist(bins=bins, figsize=(5,3))

R_squares = []
years = []
for year in arange(1980, 2016):
start_year, end_year = year-1, year+1
sub_df = df[str(start_year):str(end_year)]
if len(sub_df) > 1000:
density, _ = np.histogram(sub_df[column], bins=bins, density=True)
y_mean = np.mean(density_all)
SS_tot = np.sum(np.power(density_all - y_mean, 2))
SS_res = np.sum(np.power(density_all - density, 2))

R_square = 1 - SS_res / SS_tot
R_squares.append(R_square)
years.append(year)

plt.figure()
plot(years, R_squares)
ylim = max(min(plt.gca().get_ylim()[0],0.85),0)
plt.gca().set_ylim(bottom=ylim, top=1)
plt_configure(figsize=(5,3))
align_figures()


## 1.6 Re-distribute Direction and Speed (Optional)¶

e.g. Dir 50 -> -45 ~ 55, to make KDE result better

In [28]:
if integer_data:
df = randomize_angle(df, DIR_REDISTRIBUTE, SECTOR_LENGTH)

In [29]:
if integer_data:
if delete_zero:
redistribute_method = 'down'
else:
redistribute_method = 'up'

df, speed_redistribution_info = randomize_speed(df, redistribute_method)

Redistribute upward, e.g. 0 -> [0,1]


## 1.7 Generate (x,y) from (speed,dir)¶

In [30]:
# Cook orientation
# df['dir']= (df['dir'] + 180)%360

In [31]:
# There might be a small dot in the centre, which is due to too many zero (more than 1 speed) in center
# Scatter plot in matplot has performance issue, the speed is very slow
df['x'] = df['speed'] * cos(df['dir'] * pi / 180.0)
df['y'] = df['speed'] * sin(df['dir'] * pi / 180.0)


# 2. Re-select Data and Overview¶

## 2.1 Data Overview¶

In [32]:
## Summery of the data selection
print('Knot unit?', knot_unit)
print('Report type used:', report_type_most_used)
print('Sampling time used:', sample_times)
if 'speed_redistribution_info&