Modeling the Joint Distribution of Wind Speed and Direction using Gaussain Mixture Models¶

OEN Method: Harris, Cook The parent wind speed distribution: Why Weibull? http://www.sciencedirect.com/science/article/pii/S0167610514001056

Gaussian Mixture Models, http://scikit-learn.org/stable/modules/mixture.html

1. Set up¶

1.1 Environment¶

In [1]:
%matplotlib inline

from import_file import *
from helpers.parallel_helper import *

plt.rcParams['axes.autolimit_mode'] = 'round_numbers'
plt.rcParams['axes.xmargin'] = 0.
plt.rcParams['axes.ymargin'] = 0.
mpl.rcParams['patch.force_edgecolor'] = True


In [2]:
# file_path, bandwidth= './data/NCDC/europe/uk/marham/dat.txt', 1.7
# file_path, bandwidth= './data/NCDC/europe/uk/tiree/dat.txt', 1.9
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/NCDC/europe/uk/boscombe_down/dat.txt', 1.5, 4
# file_path, bandwidth= './data/NCDC/europe/uk/middle_wallop/dat.txt', 1.3
# file_path, bandwidth= './data/NCDC/europe/uk/bournemouth/dat.txt',1.3 # 4?
# file_path= "./data/NCDC/europe/uk/weybourne/dat.txt"
# file_path= "./data/NCDC/europe/uk/skye_lusa/dat.txt" #
# file_path= "./data/NCDC/europe/uk/wattisham/dat.txt"
# file_path= "./data/NCDC/europe/uk/south_uist_range/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/europe/uk/holbeach/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/europe/uk/cambridge/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/europe/us/baltimore/dat.txt" # time too short
# file_path= "./data/NCDC/europe/uk/bealach_na_ba/dat.txt" # time too short
# file_path= "./data/NCDC/europe/uk/benbecula/dat.txt" # truncate (untruncate in m/s), 4?
# file_path= './data/NCDC/europe/uk/southhamption/dat.txt' # high 0, trend

# file_path, bandwidth, NUMBER_OF_GAUSSIAN = "./data/NCDC/europe/germany/landsberg_lech/dat.txt", 0.9, 4
# file_path, bandwidth= "./data/NCDC/europe/germany/neuburg/dat.txt", 0.7
# file_path, bandwidth= "./data/NCDC/europe/germany/laupheim/dat.txt", 0.7 # double peak, 4?, trend
# file_path, bandwidth= './data/NCDC/europe/germany/niederstetten/dat.txt', 0.9 # get the peak
# file_path, bandwidth= "./data/NCDC/europe/germany/holzdorf/dat.txt", 0.9 # 2008 year
# file_path, bandwidth, NUMBER_OF_GAUSSIAN= './data/NCDC/europe/france/nantes/dat.txt', 0.9, 4 # unit shift, one direction deviate big
# file_path, convert_to_knot= './data/NCDC/europe/france/pau_pyrenees/dat.txt', True # unit shift, 2; force using knot
# file_path= "./data/NCDC/europe/france/avord/dat.txt" # try 4, initial speed (should be good with m/s), incompete dataset
# file_path= "./data/NCDC/europe/france/vatry/dat.txt"  # double peak, initial speed, incompete dataset
# file_path, bandwidth, NUMBER_OF_GAUSSIAN= "./data/NCDC/europe/spain/valladolid/dat.txt", 1.1, 4
# file_path= './data/NCDC/europe/spain/jerez/dat.txt' # high 0
# file_path, bandwidth= "./data/NCDC/europe/spain/barayas/dat.txt", 0.7 # not good fit
# file_path, bandwidth= './data/NCDC/europe/spain/malaga/dat.txt', 0.7 # directions blocked?
# file_path, bandwidth= './data/NCDC/europe/spain/tenerife_sur/dat.txt', 0.7 # directions blocked?
# file_path, bandwidth= './data/NCDC/europe/spain/almeria/dat.txt', 0.7 # negative dimensions?
# file_path, bandwidth= './data/NCDC/europe/greece/eleftherios_intl/dat.txt',0.7 # some direction might be blocked
# file_path= './data/NCDC/europe/ciampino/dat.txt' # try 4, bandwidth?
# file_path= "./data/NCDC/europe/huspel_aws/dat.txt"  # integer, 4?
# file_path= './data/NCDC/gibraltar/dat.txt' # bad fit

# MidEast
file_path, bandwidth= './data/NCDC/mideast/uae/al_maktoum/dat.txt', 1.1
# file_path= './data/NCDC/mideast/uae/sharjah_intl/dat.txt'
# file_path= './data/NCDC/mideast/uae/dubai_intl/dat.txt'
# file_path= './data/NCDC/mideast/uae/abu_dhabi_intl/dat.txt' # Time shift
# file_path= './data/NCDC/mideast/uae/bateen/dat.txt' # Time shift
# file_path= './data/NCDC/mideast/buraimi/dat.txt' # not good dataset
# file_path= './data/NCDC/mideast/turkey/konya/dat.txt'
# file_path= './data/NCDC/mideast/turkey/sivas/dat.txt' # bad dataset
# file_path= './data/NCDC/mideast/turkey/balikesir/dat.txt' # bad dataset
# file_path= './data/NCDC/mideast/turkey/bartin/dat.txt' # bad dataset
# file_path= './data/NCDC/mideast/iran/chahbahar/dat.txt'
# file_path= './data/NCDC/mideast/iran/zabol/dat.txt' # Problematic data
# file_path= './data/NCDC/mideast/iran/torbat_heydarieh/dat.txt' # Unusable

# file_path, bandwidth = "./data/NCDC/cn/shanghai/hongqiao_intl/dat.txt", 0.6
# file_path, bandwidth= "./data/NCDC/cn/shanghai/pudong/dat.txt", 0.8
# file_path, bandwidth= "./data/NCDC/cn/hefei_luogang/dat.txt", 0.6 # few 0, trend
# file_path, bandwidth= "./data/NCDC/cn/nanjing_lukou/dat.txt", 0.5
# file_path= "./data/NCDC/cn/zhengzhou_xinzheng/dat.txt"
# file_path= "./data/NCDC/cn/tianjin/binhai/dat.txt" # few 0, trend, stationary speed, unstationary direction
# file_path= "./data/NCDC/cn/tianjin/tianjing/dat.txt" # 16 sectors
# file_path= "./data/NCDC/cn/shijiazhuang_zhengding/dat.txt"
# file_path= "./data/NCDC/cn/henan_gushi/dat.txt" # 16 sectors, fit not very good
# file_path= "./data/NCDC/cn/nanning_wuxu/dat.txt" # numpy priblem, unstationary speed
# file_path= './data/NCDC/cn/macau/dat.txt'
# file_path= "./data/NCDC/cn/hk_intl/dat.txt" # few 0
# file_path= './data/NCDC/cn/gaoqi/dat.txt'

# file_path= './data/NCDC/southeast_asia/malaysia/mersing/dat.txt' # 2 mode, paper comparison
# file_path= './data/NCDC/southeast_asia/malaysia/penang/dat.txt'
# file_path= './data/NCDC/southeast_asia/malaysia/butterworth/dat.txt' # 2 mode
# file_path= "./data/NCDC/southeast_asia/malaysia/bsultan_mahmud/dat.txt" # stable
# file_path= "./data/NCDC/southeast_asia/malaysia/bsultan_ismail/dat.txt" #
# file_path= "./data/NCDC/southeast_asia/singapore/changi/dat.txt" # trend, no 0, questionary data
# file_path= "./data/NCDC/southeast_asia/singapore/paya_lebar/dat.txt" # questionary data
# file_path= "./data/NCDC/southeast_asia/singapore/seletar/dat.txt"
# file_path= "./data/NCDC/east_asia/cheongju_intl/dat.txt" # 2005-2009  may have problem, fit is good; numpy problem
# file_path= "./data/NCDC/east_asia/daegu_ab/dat.txt" # recent 5 year may have problem, but fit is generally good; numpy problem

# file_path, bandwidth= "./data/NCDC/oceania/auckland_intl/dat.txt", 0.9  # Good data, double mode
# file_path= "./data/NCDC/oceania/brisbane_archerfield/dat.txt" # high 0, few data
# file_path= "./data/NCDC/oceania/narrandera/dat.txt" # high 0, few data
# file_path, bandwidth= "./data/NCDC/oceania/canberra/dat.txt", 0.7 # high 0, bad fit
# file_path, bandwidth, NUMBER_OF_GAUSSIAN= './data/NCDC/oceania/horsham/dat.txt', 0.9, 4 # get the peak

# file_path, bandwidth= './data/NCDC/us/boston_16nm/dat.txt', 0.9 # Offshore, mixed type

# file_path, bandwidth= './data/asos/olympia/hr_avg.csv', 0.5 # might block
# file_path, bandwidth, NUMBER_OF_GAUSSIAN  = './data/asos/bismarck_ND/hr_avg.csv', 1.1, 4
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/asos/aberdeen_SD/hr_avg.csv', 1.7, 2 # only to 2012
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/asos/minneapolis/hr_avg.csv', 1.1, 4
# file_path, bandwidth = './data/asos/lincoln_NE/hr_avg.csv', 0.9
# file_path, bandwidth = './data/asos/des_moines_IA/hr_avg.csv', 1.3
# file_path, bandwidth = './data/asos/springfield_IL/hr_avg.csv', 1.1
# file_path, bandwidth = './data/asos/topeka/hr_avg.csv', 0.7 # High 0
# file_path, bandwidth = './data/asos/denver/hr_avg.csv', 1.3

# file_path, bandwidth, NUMBER_OF_GAUSSIAN = './data/NDAWN/baker/hr_avg.csv', 0.7, 4
# file_path, bandwidth = './data/NDAWN/dickinson/hr_avg.csv', 0.6
# file_path = './data/NDAWN/rugby/hr_avg.csv'
# file_path = './data/NDAWN/bowman/hr_avg.csv'
# file_path = './data/NDAWN/grand_forks/hr_avg.csv'
# file_path = './data/NDAWN/williston/hr_avg.csv'
# file_path = './data/NDAWN/jamestown/hr_avg.csv'

# file_path, bandwidth, NUMBER_OF_GAUSSIAN = 'data/ECMWF/usa/47N123W/dat.csv', 0.7, 4 #good
# file_path, bandwidth = 'data/ECMWF/venezuela/8N67W/dat.csv', 0.7 # good, but the data might be problematic.
# file_path, bandwidth = 'data/ECMWF/chile/52S75W/dat.csv', 1.9 # good
# file_path, bandwidth= 'data/ECMWF/iceland/65N17W/dat.csv', 1.9 # good
# file_path, bandwidth, NUMBER_OF_GAUSSIAN  = 'data/ECMWF/germany/49N9E/dat.csv', 1.1, 4 # miss peak
# file_path, bandwdith = 'data/ECMWF/sudan/18N32E/dat.csv', 1.1 # good
# file_path, bandwidth = 'data/ECMWF/china/24N121E/dat.csv', 0.9 # good
# file_path, bandwidth, NUMBER_OF_GAUSSIAN = 'data/ECMWF/australia/37S142E/dat.csv', 0.7, 4 # miss the peak, force bandwidth 0.7

In [3]:
if "cn_database" in file_path:
elif 'NCDC' in file_path:
df.rename(columns={'Date':'date','Dir':'dir','Spd':'speed','Type':'type','I.1':'wind_type'}, inplace=True)
df = df[['date','HrMn','type','dir','speed','wind_type' ]]
df.dropna(subset=['dir','speed'], inplace=True)
integer_data = True
elif 'NDAWN' in file_path:
df['type']='default'
df['wind_type']='default'
df = df.dropna()
convert_to_knot = False
integer_data = False
elif 'asos' in file_path:
# ASOS
df['type']='default'
df['wind_type']='default'
df = df.dropna()
convert_to_knot = False
integer_data = False
knot_unit = True
else:
df.rename(columns={'U':'x','V':'y'}, inplace=True)
df.x=-df.x
df.y=-df.y
df['speed']=np.sqrt(df.x**2+df.y**2)
df['dir']=np.degrees(np.arctan2(df.y, df.x))%360
df['time']=pd.to_datetime('1979-01-01T00:00:00Z')+pd.to_timedelta(df['time'], unit='h')
df['date']=df['time'].dt.strftime('%Y%m%d')
df['date']=df['date'].astype(int)
df['HrMn']=df['time'].dt.strftime('%H00')
df['type']='default'
df['wind_type']='default'
convert_to_knot = True
integer_data = False
cartesian = True

In [4]:
df

Out[4]:
date HrMn type dir speed wind_type
0 20100518 0500 FM-15 999 1.0 V
1 20100518 0600 FM-15 999 1.0 V
2 20100518 0700 FM-15 330 2.1 N
3 20100527 0900 FM-15 190 5.1 N
4 20100614 0400 FM-15 170 7.2 N
5 20100614 0500 FM-15 160 6.2 N
6 20100614 0600 FM-15 170 5.1 N
7 20100614 0700 FM-15 180 2.6 N
8 20100614 0800 FM-15 999 1.5 V
9 20100614 1100 FM-15 310 7.2 N
10 20100614 1200 FM-15 340 7.7 N
11 20100614 1300 FM-15 330 8.2 N
12 20100614 1400 FM-15 320 6.2 N
13 20100614 1500 FM-15 320 2.6 N
14 20100614 1700 FM-15 340 2.6 N
15 20100614 1800 FM-15 360 3.1 N
16 20100614 1900 FM-15 90 2.6 N
17 20100614 2000 FM-15 100 3.6 N
18 20100614 2200 FM-15 110 2.1 N
19 20100614 2300 FM-15 120 2.1 N
20 20100615 0000 FM-15 140 1.0 N
21 20100615 0100 FM-15 150 2.1 N
22 20100615 0200 FM-15 130 0.5 N
23 20100615 0400 FM-15 150 2.6 N
24 20100615 0500 FM-15 160 6.2 N
25 20100615 0600 FM-15 160 4.1 N
26 20100615 0700 FM-15 170 3.1 N
27 20100615 0800 FM-15 999 2.1 V
28 20100615 0900 FM-15 999 1.0 V
29 20100615 1000 FM-15 340 8.2 N
... ... ... ... ... ... ...
61722 20170331 1800 FM-15 120 4.1 N
61723 20170331 1900 FM-15 120 3.6 N
61724 20170331 2000 FM-15 90 2.6 N
61725 20170331 2100 FM-15 110 3.1 N
61726 20170331 2200 FM-15 110 3.1 N
61727 20170331 2300 FM-15 130 3.1 N
61728 20170401 0000 FM-15 80 2.1 N
61729 20170401 0100 FM-15 120 2.1 N
61730 20170401 0200 FM-15 80 2.1 N
61731 20170401 0300 FM-15 40 2.1 N
61732 20170401 0400 FM-15 999 0.5 V
61733 20170401 0500 FM-15 310 1.5 N
61734 20170401 0600 FM-15 330 1.5 V
61735 20170401 0700 FM-15 999 1.0 V
61736 20170401 0800 FM-15 290 3.1 V
61737 20170401 0900 FM-15 330 3.1 V
61738 20170401 1000 FM-15 320 2.6 V
61739 20170401 1100 FM-15 330 2.6 V
61740 20170401 1200 FM-15 340 2.1 V
61741 20170401 1300 FM-15 340 6.2 N
61742 20170401 1400 FM-15 330 4.1 N
61743 20170401 1500 FM-15 330 4.1 N
61744 20170401 1600 FM-15 340 2.6 N
61745 20170401 1700 FM-15 20 1.0 N
61746 20170401 1800 FM-15 100 1.5 N
61747 20170401 1900 FM-15 90 3.1 N
61748 20170401 2000 FM-15 80 3.1 N
61749 20170401 2100 FM-15 60 2.6 N
61750 20170401 2200 FM-15 80 2.1 N
61751 20170401 2300 FM-15 90 3.1 N

61752 rows Ã— 6 columns

In [5]:
if 'NCDC' in file_path:
lat, long = get_lat_long(file_path)
print(lat,long)
map_osm = folium.Map(location=[lat, long], zoom_start=4)
display(map_osm)

24.886 55.172

In [6]:
df['time']=pd.to_datetime(df["date"].astype(str).map(str) + df["HrMn"], format='%Y%m%d%H%M')
df.set_index(['time'], inplace=True)
df['HrMn']=df['HrMn'].astype(int)
df = df.query("(dir <= 999) & (speed < 100) ")['1970':'2016']

In [7]:
plot_speed_and_angle_distribution(df.speed, df.dir)

D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
warnings.warn(self.msg_depr % (key, alt_key))

In [8]:
# Dir [10,360]=> [0,350]
df['dir'] = df['dir'].apply(lambda x: x%360 if x < 999 else x)
# Convert Windrose coordianates to Polar Cooridinates
if 'cartesian' in globals():
df['dir_windrose'] = df['dir'].apply(lambda x: (90 - x)%360 if x < 999 else x)
else:
df['dir_windrose'] = df['dir']
df['dir'] = df['dir'].apply(lambda x: (90 - x)%360 if x < 999 else x)
display(df.describe())
df.plot(y='speed',legend=True,figsize=(20,5))

date HrMn dir speed dir_windrose
count 5.938300e+04 59383.000000 59383.000000 59383.000000 59383.000000
mean 2.013312e+07 1135.866376 226.156712 4.131822 237.997642
std 1.889311e+04 692.584200 210.592490 2.311919 208.489040
min 2.010052e+07 0.000000 0.000000 0.000000 0.000000
25% 2.012022e+07 500.000000 120.000000 2.600000 120.000000
50% 2.013101e+07 1100.000000 180.000000 3.600000 200.000000
75% 2.015052e+07 1700.000000 280.000000 5.700000 300.000000
max 2.016123e+07 2359.000000 999.000000 20.100000 999.000000
Out[8]:
<matplotlib.axes._subplots.AxesSubplot at 0xc27f940>

1.3 General Data Info¶

1.3.1 Unit Detection¶

In [9]:
df['decimal'] = df.speed % 1
df.decimal.hist(alpha=0.5, label='m/s', figsize=(4, 3))

if 'convert_to_knot' not in globals():
convert_to_knot = True if len(df.query('decimal >= 0.2')) / len(df) > 0.3 else False

if convert_to_knot:
knot_unit = True
df['speed'] = df['speed'] * 1.943845
df['decimal'] = df.speed % 1
df.decimal.hist(alpha=0.5, label='knot')
# need more elaboration, some is not near an integer
if integer_data:
df['speed'] = df['speed'].apply(lambda x: int(round(x)))
plt_configure(xlabel='Decimal', ylabel='Frequency', legend={'loc': 'best'}, title='Decimal Distribution')
else:
if 'knot_unit' not in globals():
knot_unit = False

df.drop(['decimal'], 1,inplace=True)
print(knot_unit)

True

In [10]:
dir_unit_text = ' (degree)'
if knot_unit == True:
speed_unit_text = ' (knot)'
else:
speed_unit_text = ' (m/s)'


1.3.2 Sampling Type Selection¶

In [11]:
sample_type = df.query('date > 20000000')['type']
sample_type.value_counts().plot(
kind = 'bar', title = 'Report Types Comprisement', figsize=(4,3))

report_type_most_used = sample_type.value_counts().argmax()
df = df.query("type==@report_type_most_used")


1.3.3 Sampling Time Selection¶

In [12]:
MID_YEAR = int(np.average(df.index.year))

df['HrMn'].value_counts().sort_index().plot(kind='bar', alpha=0.5,label='Overall')
df[str(MID_YEAR):]['HrMn'].value_counts().sort_index().plot(
kind='bar', alpha=0.5, label='>= %s' %  MID_YEAR )

plt_configure(xlabel='Sampling Time', ylabel='Frequency', legend={'loc':'best'}, figsize=(8, 4),
title = 'Sampling Time Distribution, Overall and > %s ' %  MID_YEAR)

In [13]:
df['sample_time'] = df.HrMn % 100
sample_time = df['2000':]['sample_time']
sample_times = sample_time.value_counts()[sample_time.value_counts() > 2000]
sample_times = sample_times.index.tolist()
# df = df.query("sample_time in @sample_times")
df = df.query("sample_time == @sample_times[0]")
df.drop(['sample_time'], 1,inplace=True)
print(sample_times)

df['HrMn'].value_counts().sort_index().plot(kind='bar', alpha=0.5, figsize=(10, 4))

[0]

Out[13]:
<matplotlib.axes._subplots.AxesSubplot at 0xc3edd30>

1.4 Error Data handling and Adjustment¶

1.4.1 Artefacts¶

wrong direction record

In [14]:
if integer_data:
display(df.query("(dir % 10 >= 0.1) & (dir != 999)"))
df = df.query('(dir % 10 <= 0.1) | (dir == 999)')

date HrMn type dir speed wind_type dir_windrose
time

sudden increase in speed

In [15]:
# sudden increse
df['incre'] = df.speed.diff(1)
df['incre'].fillna(0, inplace=True)
df['incre_reverse'] = df.speed.diff(-1)
df['incre_reverse'].fillna(0, inplace=True)

df['incre'].plot(kind='hist', bins=arange(-15, 15), legend=True, figsize=(8, 3))

date HrMn type dir speed wind_type dir_windrose incre incre_reverse
time
2016-03-09 10:00:00 20160309 1000 FM-15 60 36 N 30 13.0 13.0
2012-04-14 00:00:00 20120414 0 FM-15 180 34 N 270 8.0 26.0
2015-02-21 06:00:00 20150221 600 FM-15 280 32 N 170 3.0 1.0
2012-02-26 05:00:00 20120226 500 FM-15 270 32 N 180 7.0 1.0
2012-02-26 00:00:00 20120226 0 FM-15 290 32 N 160 2.0 0.0
2012-02-26 01:00:00 20120226 100 FM-15 280 32 N 170 0.0 8.0
2012-02-26 06:00:00 20120226 600 FM-15 270 31 N 180 -1.0 7.0
2011-01-28 10:00:00 20110128 1000 FM-15 110 31 N 340 23.0 5.0
2015-02-21 07:00:00 20150221 700 FM-15 270 31 N 180 -1.0 1.0
2015-02-21 08:00:00 20150221 800 FM-15 270 30 N 180 -1.0 4.0
Out[15]:
<matplotlib.axes._subplots.AxesSubplot at 0xc65f9b0>
In [16]:
incre_threshold = 20 if knot_unit else 10
print('sudden increase number', len(df.query('(incre > @incre_threshold )&(incre_reverse > @incre_threshold )')))
df = df.query('(incre < @incre_threshold )|(incre_reverse < @incre_threshold )')

# Check the max speed
df.drop(['incre', 'incre_reverse'], 1, inplace=True)

sudden increase number 1

date HrMn type dir speed wind_type dir_windrose incre incre_reverse
time
2016-03-09 10:00:00 20160309 1000 FM-15 60 36 N 30 13.0 13.0
2012-04-14 00:00:00 20120414 0 FM-15 180 34 N 270 8.0 26.0
2012-02-26 05:00:00 20120226 500 FM-15 270 32 N 180 7.0 1.0
2015-02-21 06:00:00 20150221 600 FM-15 280 32 N 170 3.0 1.0
2012-02-26 01:00:00 20120226 100 FM-15 280 32 N 170 0.0 8.0
2012-02-26 00:00:00 20120226 0 FM-15 290 32 N 160 2.0 0.0
2015-02-21 07:00:00 20150221 700 FM-15 270 31 N 180 -1.0 1.0
2011-01-28 10:00:00 20110128 1000 FM-15 110 31 N 340 23.0 5.0
2012-02-26 06:00:00 20120226 600 FM-15 270 31 N 180 -1.0 7.0
2012-02-25 23:00:00 20120225 2300 FM-15 290 30 N 160 2.0 -2.0

1.4.2 Direction re-aligment¶

For some dataset, the 16 sectors are not record properly,

e.g. the sectors are [0,20,50 ...], need to redistribute the angle into 22.5, e.g. [0, 22.5, 45...]

In [17]:
display(df['dir'].value_counts().sort_index())
effective_column = df.query('dir < 999')['dir'].value_counts()[df['dir'].value_counts() > 30].sort_index()
if integer_data:
SECTOR_LENGTH = 360/len(effective_column)
else:
SECTOR_LENGTH = 10
print(len(effective_column), SECTOR_LENGTH)

0      1535
10     1354
20     1263
30     1208
40     1043
50      984
60      864
70      795
80      939
90     1073
100    1208
110    1596
120    2068
130    1973
140    2554
150    2690
160    2782
170    1791
180    1222
190     859
200     765
210     819
220    1001
230     995
240    1412
250    1376
260    1482
270    2078
280    2232
290    2133
300    1825
310    1310
320    1100
330    1115
340    1282
350    1394
999    3156
Name: dir, dtype: int64
36 10.0

In [18]:
df=realign_direction(df, effective_column)


1.4.3 0 Speed¶

In [19]:
with_too_many_zero, null_wind_frequency = is_with_too_many_zero(df['2005':])
delete_zero = with_too_many_zero
if delete_zero:
df = df.query('(speed > 0)')
print(delete_zero, null_wind_frequency)

False 0.00341920544178

In [20]:
print(df.query('dir == 999')['speed'].value_counts())
df=fill_direction_999(df, SECTOR_LENGTH)

2     1403
1      533
3      519
4      257
0      189
5      152
6       65
7       26
8        7
10       3
11       1
9        1
Name: speed, dtype: int64


1.5 Time Shift Comparison¶

In [21]:
DIR_REDISTRIBUTE = 'even'
if DIR_REDISTRIBUTE == 'even':
DIR_BIN = arange(-5, 360, 10)
elif DIR_REDISTRIBUTE == 'round_up':
DIR_BIN = arange(0, 360+10, 10)

# Comparison between mid_year, looking for:
# 1. Odd Even Bias
# 2. Time Shift of Wind Speed Distribution
bins = arange(0, df.speed.max() + 1)
df[:str(MID_YEAR)]['speed'].plot(
kind='hist', alpha=0.5,bins=bins, label='< %s' % MID_YEAR)

df[str(MID_YEAR+1):]['speed'].plot(
kind='hist', alpha=0.5,bins=bins, label='> %s' % MID_YEAR)

plt.suptitle('Speed Comparison between year < %s, > %s ' % (MID_YEAR, MID_YEAR), fontsize = 14)
plt_configure(xlabel='Speed', ylabel='Frequency', legend=True, figsize=(8, 3))

In [22]:
df[:str(MID_YEAR)]['dir'].plot(
kind='hist', alpha=0.5,bins=DIR_BIN, label='< %s' % MID_YEAR)

df[str(MID_YEAR+1):]['dir'].plot(
kind='hist', alpha=0.5,bins=DIR_BIN, label='> %s' % MID_YEAR)

plt.suptitle('Dir Comparison between year < %s, and > %s ' % (MID_YEAR, MID_YEAR), fontsize = 14)
plt_configure(xlabel='Dir', ylabel='Frequency', legend={'loc':'best'}, figsize=(8, 3), tight='x')

In [23]:
display(df[df['dir'].isnull()])
df.dropna(subset=['dir'], inplace=True)

date HrMn type dir speed wind_type dir_windrose
time
2010-05-18 05:00:00 20100518 500 FM-15 NaN 2 V 999
2010-05-18 06:00:00 20100518 600 FM-15 NaN 2 V 999
In [24]:
# Inspect the time shift of speed and degree distribution, and odd-even bias
check_time_shift(df, speed_unit_text=speed_unit_text, dir_unit_text=dir_unit_text)

2011 - 2015

D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
warnings.warn(self.msg_depr % (key, alt_key))

In [25]:
df.resample('A').mean().plot(y='speed')
plt.gca().set_ylim(bottom=0)
df.resample('M').mean().plot(y='speed', figsize=(20,4))
plt.gca().set_ylim(bottom=0)

Out[25]:
(0, 10.0)
In [26]:
%%time
for column in ['speed', 'dir']:
if column == 'speed':
bins = arange(0, df[column].max()+1, 1)
else:
bins = arange(0, 361, 10)
den, _ = np.histogram(df[column], bins=bins, density=True)
y_top=max(den)*1.2
for year in arange(1980, 2016):
end_year = year
sub_df = df[str(year):str(end_year)]
if len(sub_df) > 1000:
plt.figure()
df[column].hist(bins=bins, alpha=0.3, normed=True)
sub_df[column].hist(bins=bins, alpha=0.5, figsize=(3,1.5), normed=True)
plt.gca().set_ylim(top=y_top)
plt_configure(title=str(year))
align_figures()

Wall time: 4.24 s

In [27]:
for column in ['speed', 'dir']:
if column == 'speed':
bins = arange(0, df[column].max()+1, 1)
else:
bins = arange(0, 361, 10)
density_all, _ = np.histogram(df[column], bins=bins, density=True)
df[column].hist(bins=bins, figsize=(5,3))

R_squares = []
years = []
for year in arange(1980, 2016):
start_year, end_year = year-1, year+1
sub_df = df[str(start_year):str(end_year)]
if len(sub_df) > 1000:
density, _ = np.histogram(sub_df[column], bins=bins, density=True)
y_mean = np.mean(density_all)
SS_tot = np.sum(np.power(density_all - y_mean, 2))
SS_res = np.sum(np.power(density_all - density, 2))

R_square = 1 - SS_res / SS_tot
R_squares.append(R_square)
years.append(year)

plt.figure()
plot(years, R_squares)
ylim = max(min(plt.gca().get_ylim()[0],0.85),0)
plt.gca().set_ylim(bottom=ylim, top=1)
plt_configure(figsize=(5,3))
align_figures()


1.6 Re-distribute Direction and Speed (Optional)¶

e.g. Dir 50 -> -45 ~ 55, to make KDE result better

In [28]:
if integer_data:
df = randomize_angle(df, DIR_REDISTRIBUTE, SECTOR_LENGTH)

In [29]:
if integer_data:
if delete_zero:
redistribute_method = 'down'
else:
redistribute_method = 'up'

df, speed_redistribution_info = randomize_speed(df, redistribute_method)

Redistribute upward, e.g. 0 -> [0,1]


1.7 Generate (x,y) from (speed,dir)¶

In [30]:
# Cook orientation
# df['dir']= (df['dir'] + 180)%360

In [31]:
# There might be a small dot in the centre, which is due to too many zero (more than 1 speed) in center
# Scatter plot in matplot has performance issue, the speed is very slow
df['x'] = df['speed'] * cos(df['dir'] * pi / 180.0)
df['y'] = df['speed'] * sin(df['dir'] * pi / 180.0)


2. Re-select Data and Overview¶

2.1 Data Overview¶

In [32]:
## Summery of the data selection
print('Knot unit?', knot_unit)
print('Report type used:', report_type_most_used)
print('Sampling time used:', sample_times)
if 'speed_redistribution_info' in globals():
print('Speed redistribution info:', speed_redistribution_info )

df_all_years = df # for later across-year comparison
df = df_all_years['2011':'2015']
# df = df.query('(HrMn == 0) and (speed >= 0.5) and (date%10000 > 900) and (date%10000 < 1000)' )
df.describe()

Knot unit? True
Report type used: FM-15
Sampling time used: [0]
Speed redistribution info: Redistribute upward, e.g. 0 -> [0,1]

Out[32]:
date HrMn dir speed dir_windrose x y
count 4.206600e+04 42066.000000 42066.000000 42066.000000 42066.000000 42066.000000 42066.000000
mean 2.013044e+07 1153.544430 179.874592 8.447488 237.262920 -1.553747 0.376113
std 1.405171e+04 691.989734 98.737423 4.375064 208.025899 6.507981 6.752294
min 2.011010e+07 0.000000 -4.978698 0.005481 0.000000 -34.524102 -32.472160
25% 2.012033e+07 600.000000 110.516651 5.156952 110.000000 -6.005882 -4.253203
50% 2.013062e+07 1200.000000 167.650918 7.482294 200.000000 -0.962347 0.459120
75% 2.014092e+07 1800.000000 269.171783 11.220914 300.000000 3.482725 5.152950
max 2.015123e+07 2300.000000 354.999753 34.535472 999.000000 27.009530 29.545306
In [33]:
df.plot(y='speed',legend=True,figsize=(20,5))

Out[33]:
<matplotlib.axes._subplots.AxesSubplot at 0xc3aed30>
In [34]:
# Accumulation by month
df.resample('M').count().plot(y='date', kind='bar',figsize=(20,4))

Out[34]:
<matplotlib.axes._subplots.AxesSubplot at 0xc6c35f8>
In [35]:
# 90 degree is east
ax = WindroseAxes.from_ax()
viridis = plt.get_cmap('viridis')
ax.bar(df.dir_windrose, df.speed, normed=True, opening=0.8, edgecolor='white', nsector=36, cmap=viridis)
ax.set_legend()

D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\cbook.py:136: MatplotlibDeprecationWarning: The axisbg attribute was deprecated in version 2.0. Use facecolor instead.
warnings.warn(message, mplDeprecation, stacklevel=1)