# importing necessary libraries
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
Numpy Pandas Basics
ML
Tutorial
Lists
# Creating lists
= [1, 2, 3, 4, 5]
list_a = [6, 7, 8, 9, 10] list_b
# Operations on lists
# Adding lists
= [a + b for a, b in zip(list_a, list_b)]
list_sum print("List Sum:", list_sum)
# Vector product using lists
= [a * b for a, b in zip(list_a, list_b)]
vector_product print("Vector Product:", vector_product)
List Sum: [7, 9, 11, 13, 15]
Vector Product: [6, 14, 24, 36, 50]
Numpy Array
# Creating numpy arrays
= np.array(list_a)
numpy_array_a = np.array(list_b) numpy_array_b
# Operations on numpy arrays
# Adding numpy arrays
= numpy_array_a + numpy_array_b
numpy_sum print("Numpy Sum:", numpy_sum)
# Vector product using numpy arrays
= np.multiply(numpy_array_a, numpy_array_b)
numpy_vector_product print("Numpy Vector Product:", numpy_vector_product)
Numpy Sum: [ 7 9 11 13 15]
Numpy Vector Product: [ 6 14 24 36 50]
np.allclose(list_sum, numpy_sum), np.allclose(vector_product, numpy_vector_product)
(True, True)
Time comparison between list and numpy array
# Creating large arrays and lists for time comparison
= np.random.randint(0, 100, size=10000)
numpy_array_a = np.random.randint(0, 100, size=10000)
numpy_array_b
= list(numpy_array_a)
list_a = list(numpy_array_b) list_b
# Time for list addition
= time.time()
start_time for _ in range(1000):
= [a + b for a, b in zip(list_a, list_b)]
list_sum = time.time()
end_time print("Time taken for lists addition:", end_time - start_time)
# Time for numpy addition
= time.time()
start_time for _ in range(1000):
= numpy_array_a + numpy_array_b
numpy_sum = time.time()
end_time print("Time taken for numpy addition:", end_time - start_time)
Time taken for lists addition: 0.5500102043151855
Time taken for numpy addition: 0.0038487911224365234
# Time for list vector product
= time.time()
start_time for _ in range(10000):
= [a * b for a, b in zip(list_a, list_b)]
list_product
= time.time()
end_time print("Time taken for list vector product:", end_time - start_time)
# Time for numpy vector product
= time.time()
start_time for _ in range(10000):
= np.multiply(numpy_array_a, numpy_array_b)
numpy_product
= time.time()
end_time print("Time taken for numpy vector product:", end_time - start_time)
Time taken for list vector product: 5.371699571609497
Time taken for numpy vector product: 0.047417640686035156
np.allclose(list_sum, numpy_sum), np.allclose(vector_product, numpy_vector_product)
(True, True)
= %timeit -o [a + b for a, b in zip(list_a, list_b)] timeit_add_list
542 µs ± 593 ns per loop (mean ± std. dev. of 7 runs, 1,000 loops each)
= %timeit -o numpy_array_a + numpy_array_b timeit_add_numpy
3.5 µs ± 6.1 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)
Code clarity
# Numpy code is often more concise and readable than list comprehensions
# Example: Calculate the element-wise product of two lists
= [a * b for a, b in zip(list_a, list_b)]
list_product = np.multiply(numpy_array_a, numpy_array_b) numpy_product
numpy_product
array([5950, 1995, 264, ..., 2436, 928, 665])
@numpy_array_b numpy_array_a
24470992
Reading CSV file using Numpy
!head ../datasets/tennis-discrete-output.csv
Day,Outlook,Temp,Humidity,Windy,Play
D1,Sunny,Hot,High,Weak,No
D2,Sunny,Hot,High,Strong,No
D3,Overcast,Hot,High,Weak,Yes
D4,Rain,Mild,High,Weak,Yes
D5,Rain,Cool,Normal,Weak,Yes
D6,Rain,Cool,Normal,Strong,No
D7,Overcast,Cool,Normal,Strong,Yes
D8,Sunny,Mild,High,Weak,No
D9,Sunny,Cool,Normal,Weak,Yes
np.genfromtxt?
Signature: np.genfromtxt( fname, dtype=<class 'float'>, comments='#', delimiter=None, skip_header=0, skip_footer=0, converters=None, missing_values=None, filling_values=None, usecols=None, names=None, excludelist=None, deletechars=" !#$%&'()*+,-./:;<=>?@[\\]^{|}~", replace_space='_', autostrip=False, case_sensitive=True, defaultfmt='f%i', unpack=None, usemask=False, loose=True, invalid_raise=True, max_rows=None, encoding='bytes', *, ndmin=0, like=None, ) Docstring: Load data from a text file, with missing values handled as specified. Each line past the first `skip_header` lines is split at the `delimiter` character, and characters following the `comments` character are discarded. Parameters ---------- fname : file, str, pathlib.Path, list of str, generator File, filename, list, or generator to read. If the filename extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note that generators must return bytes or strings. The strings in a list or produced by a generator are treated as lines. dtype : dtype, optional Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. comments : str, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded. delimiter : str, int, or sequence, optional The string used to separate values. By default, any consecutive whitespaces act as delimiter. An integer or sequence of integers can also be provided as width(s) of each field. skiprows : int, optional `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. skip_header : int, optional The number of lines to skip at the beginning of the file. skip_footer : int, optional The number of lines to skip at the end of the file. converters : variable, optional The set of functions that convert the data of a column to a value. The converters can also be used to provide a default value for missing data: ``converters = {3: lambda s: float(s or 0)}``. missing : variable, optional `missing` was removed in numpy 1.10. Please use `missing_values` instead. missing_values : variable, optional The set of strings corresponding to missing data. filling_values : variable, optional The set of values to be used as default when the data are missing. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional If `names` is True, the field names are read from the first line after the first `skip_header` lines. This line can optionally be preceded by a comment delimiter. If `names` is a sequence or a single-string of comma-separated names, the names will be used to define the field names in a structured dtype. If `names` is None, the names of the dtype fields will be used, if any. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended with an underscore: for example, `file` would become `file_`. deletechars : str, optional A string combining invalid characters that must be deleted from the names. defaultfmt : str, optional A format used to define default field names, such as "f%i" or "f_%02i". autostrip : bool, optional Whether to automatically strip white spaces from the variables. replace_space : char, optional Character(s) used in replacement of white spaces in the variable names. By default, use a '_'. case_sensitive : {True, False, 'upper', 'lower'}, optional If True, field names are case sensitive. If False or 'upper', field names are converted to upper case. If 'lower', field names are converted to lower case. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = genfromtxt(...)``. When used with a structured data-type, arrays are returned for each field. Default is False. usemask : bool, optional If True, return a masked array. If False, return a regular array. loose : bool, optional If True, do not raise errors for invalid values. invalid_raise : bool, optional If True, an exception is raised if an inconsistency is detected in the number of columns. If False, a warning is emitted and the offending lines are skipped. max_rows : int, optional The maximum number of rows to read. Must not be used with skip_footer at the same time. If given, the value must be at least 1. Default is to read the entire file. .. versionadded:: 1.10.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply when `fname` is a file object. The special value 'bytes' enables backward compatibility workarounds that ensure that you receive byte arrays when possible and passes latin1 encoded strings to converters. Override this value to receive unicode arrays and pass strings as input to converters. If set to None the system default is used. The default value is 'bytes'. .. versionadded:: 1.14.0 ndmin : int, optional Same parameter as `loadtxt` .. versionadded:: 1.23.0 like : array_like, optional Reference object to allow the creation of arrays which are not NumPy arrays. If an array-like passed in as ``like`` supports the ``__array_function__`` protocol, the result will be defined by it. In this case, it ensures the creation of an array object compatible with that passed in via this argument. .. versionadded:: 1.20.0 Returns ------- out : ndarray Data read from the text file. If `usemask` is True, this is a masked array. See Also -------- numpy.loadtxt : equivalent function when no data is missing. Notes ----- * When spaces are used as delimiters, or when no delimiter has been given as input, there should not be any missing data between two fields. * When the variables are named (either by a flexible dtype or with `names`), there must not be any header in the file (else a ValueError exception is raised). * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. References ---------- .. [1] NumPy User Guide, section `I/O with NumPy <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_. Examples -------- >>> from io import StringIO >>> import numpy as np Comma delimited file with mixed dtype >>> s = StringIO(u"1,1.3,abcde") >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), ... ('mystring','S5')], delimiter=",") >>> data array((1, 1.3, b'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) Using dtype = None >>> _ = s.seek(0) # needed for StringIO example only >>> data = np.genfromtxt(s, dtype=None, ... names = ['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, b'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) Specifying dtype and names >>> _ = s.seek(0) >>> data = np.genfromtxt(s, dtype="i8,f8,S5", ... names=['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, b'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) An example with fixed-width columns >>> s = StringIO(u"11.3abcde") >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], ... delimiter=[1,3,5]) >>> data array((1, 1.3, b'abcde'), dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')]) An example to show comments >>> f = StringIO(''' ... text,# of chars ... hello world,11 ... numpy,5''') >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], dtype=[('f0', 'S12'), ('f1', 'S12')]) File: ~/miniforge3/lib/python3.9/site-packages/numpy/lib/npyio.py Type: function
= np.genfromtxt('../datasets/tennis-discrete-output.csv', delimiter=',')
data data
array([[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan],
[nan, nan, nan, nan, nan, nan]])
Wait! What happened?
= np.genfromtxt('../datasets/tennis-discrete-output.csv', delimiter=',', dtype=str)
data data
array([['Day', 'Outlook', 'Temp', 'Humidity', 'Windy', 'Play'],
['D1', 'Sunny', 'Hot', 'High', 'Weak', 'No'],
['D2', 'Sunny', 'Hot', 'High', 'Strong', 'No'],
['D3', 'Overcast', 'Hot', 'High', 'Weak', 'Yes'],
['D4', 'Rain', 'Mild', 'High', 'Weak', 'Yes'],
['D5', 'Rain', 'Cool', 'Normal', 'Weak', 'Yes'],
['D6', 'Rain', 'Cool', 'Normal', 'Strong', 'No'],
['D7', 'Overcast', 'Cool', 'Normal', 'Strong', 'Yes'],
['D8', 'Sunny', 'Mild', 'High', 'Weak', 'No'],
['D9', 'Sunny', 'Cool', 'Normal', 'Weak', 'Yes'],
['D10', 'Rain', 'Mild', 'Normal', 'Weak', 'Yes'],
['D11', 'Sunny', 'Mild', 'Normal', 'Strong', 'Yes'],
['D12', 'Overcast', 'Mild', 'High', 'Strong', 'Yes'],
['D13', 'Overcast', 'Hot', 'Normal', 'Weak', 'Yes'],
['D14', 'Rain', 'Mild', 'High', 'Strong', 'No']], dtype='<U8')
data.shape
(15, 6)
Question: Find the outlook on D11
= np.argwhere(data[:, 0] == 'D11')[0, 0]
idx idx
11
data[idx]
array(['D11', 'Sunny', 'Mild', 'Normal', 'Strong', 'Yes'], dtype='<U8')
1] data[idx][
'Sunny'
Reading CSV file using Pandas
= pd.read_csv('../datasets/tennis-discrete-output.csv') df
df
Day | Outlook | Temp | Humidity | Windy | Play | |
---|---|---|---|---|---|---|
0 | D1 | Sunny | Hot | High | Weak | No |
1 | D2 | Sunny | Hot | High | Strong | No |
2 | D3 | Overcast | Hot | High | Weak | Yes |
3 | D4 | Rain | Mild | High | Weak | Yes |
4 | D5 | Rain | Cool | Normal | Weak | Yes |
5 | D6 | Rain | Cool | Normal | Strong | No |
6 | D7 | Overcast | Cool | Normal | Strong | Yes |
7 | D8 | Sunny | Mild | High | Weak | No |
8 | D9 | Sunny | Cool | Normal | Weak | Yes |
9 | D10 | Rain | Mild | Normal | Weak | Yes |
10 | D11 | Sunny | Mild | Normal | Strong | Yes |
11 | D12 | Overcast | Mild | High | Strong | Yes |
12 | D13 | Overcast | Hot | Normal | Weak | Yes |
13 | D14 | Rain | Mild | High | Strong | No |
'Day'] == 'D11' df[
0 False
1 False
2 False
3 False
4 False
5 False
6 False
7 False
8 False
9 False
10 True
11 False
12 False
13 False
Name: Day, dtype: bool
'Day'] == 'D11'] df[df[
Day | Outlook | Temp | Humidity | Windy | Play | |
---|---|---|---|---|---|---|
10 | D11 | Sunny | Mild | Normal | Strong | Yes |
'Day'] == 'D11']['Outlook'] df[df[
10 Sunny
Name: Outlook, dtype: object
'Day == "D11"')['Outlook'] df.query(
10 Sunny
Name: Outlook, dtype: object
df.shape
(14, 6)
Question. How many times do we play v/s not play tennis
= df['Play']
ser ser
0 No
1 No
2 Yes
3 Yes
4 Yes
5 No
6 Yes
7 No
8 Yes
9 Yes
10 Yes
11 Yes
12 Yes
13 No
Name: Play, dtype: object
= df['Play'].unique()
unique_play_options unique_play_options
array(['No', 'Yes'], dtype=object)
for option in unique_play_options:
print(option, (df['Play'] == option).sum())
No 5
Yes 9
'Play'].value_counts() df[
Play
Yes 9
No 5
Name: count, dtype: int64
'Play').size() df.groupby(
Play
No 5
Yes 9
dtype: int64
= df.groupby('Play') gby
len(v) for k, v in gby.groups.items()} {k:
{'No': 5, 'Yes': 9}
=df['Play'], columns='count') pd.crosstab(index
col_0 | count |
---|---|
Play | |
No | 5 |
Yes | 9 |
What is the distribution of any given attribute?
def distribution(df, attribute):
return df[attribute].value_counts()
= distribution(df, 'Outlook') ser
ser
Outlook
Sunny 5
Rain 5
Overcast 4
Name: count, dtype: int64
type(ser)
pandas.core.series.Series
ser.values
array([5, 5, 4])
ser.index
Index(['Sunny', 'Rain', 'Overcast'], dtype='object', name='Outlook')
'Temp') distribution(df,
Temp
Mild 6
Hot 4
Cool 4
Name: count, dtype: int64
Finding entropy for target variable
= 'Play'
target_attribute = distribution(df, target_attribute) dist_target
dist_target
Play
Yes 9
No 5
Name: count, dtype: int64
Normalize distribution
/dist_target.sum() dist_target
Play
Yes 0.642857
No 0.357143
Name: count, dtype: float64
'Play'].value_counts(normalize=True) df[
Play
Yes 0.642857
No 0.357143
Name: proportion, dtype: float64
= dist_target/dist_target.sum() normalized_dist_target
For loop way of calculating entropy
= 0.0
e for value, p in normalized_dist_target.items():
= e - p * np.log2(p + 1e-6) # 1e-6 is added to avoid log(0)
e print(e)
0.9402830732836911
apply(lambda x: -x * np.log2(x + 1e-6)) normalized_dist_target.
Play
Yes 0.409775
No 0.530508
Name: count, dtype: float64
apply(lambda x: -x * np.log2(x + 1e-6)).sum() normalized_dist_target.
0.9402830732836911
More on crosstab
=df['Outlook'], columns=df['Play']) pd.crosstab(index
Play | No | Yes |
---|---|---|
Outlook | ||
Overcast | 0 | 4 |
Rain | 2 | 3 |
Sunny | 3 | 2 |
=df['Outlook'], columns=df['Play']).T pd.crosstab(index
Outlook | Overcast | Rain | Sunny |
---|---|---|---|
Play | |||
No | 0 | 2 | 3 |
Yes | 4 | 3 | 2 |
= pd.crosstab(index=df['Play'], columns=df['Outlook'], normalize='columns')
df_attr df_attr
Outlook | Overcast | Rain | Sunny |
---|---|---|---|
Play | |||
No | 0.0 | 0.4 | 0.6 |
Yes | 1.0 | 0.6 | 0.4 |
Using groupby
'Play', 'Outlook']).size() df.groupby([
Play Outlook
No Rain 2
Sunny 3
Yes Overcast 4
Rain 3
Sunny 2
dtype: int64
'Play', 'Outlook']).size().index df.groupby([
MultiIndex([( 'No', 'Rain'),
( 'No', 'Sunny'),
('Yes', 'Overcast'),
('Yes', 'Rain'),
('Yes', 'Sunny')],
names=['Play', 'Outlook'])
'Play', 'Outlook']).size().unstack('Outlook') df.groupby([
Outlook | Overcast | Rain | Sunny |
---|---|---|---|
Play | |||
No | NaN | 2.0 | 3.0 |
Yes | 4.0 | 3.0 | 2.0 |
= df.groupby(['Play', 'Outlook']).size().unstack('Outlook').fillna(0)
df_attr_groupby df_attr_groupby
Outlook | Overcast | Rain | Sunny |
---|---|---|---|
Play | |||
No | 0.0 | 2.0 | 3.0 |
Yes | 4.0 | 3.0 | 2.0 |
Apply
= df_attr.apply(lambda x: -x * np.log2(x + 1e-6), axis=0)
neg_plogp neg_plogp
Outlook | Overcast | Rain | Sunny |
---|---|---|---|
Play | |||
No | 0.000000 | 0.528770 | 0.442178 |
Yes | -0.000001 | 0.442178 | 0.528770 |
sum(axis=0).sort_index() neg_plogp.
Outlook
Overcast -0.000001
Rain 0.970948
Sunny 0.970948
dtype: float64
= distribution(df, 'Outlook')
df_attr_dist = df_attr_dist/df_attr_dist.sum()
norm_attr_dist norm_attr_dist
Outlook
Sunny 0.357143
Rain 0.357143
Overcast 0.285714
Name: count, dtype: float64
*neg_plogp.sum(axis=0).sort_index()).sum() (norm_attr_dist
0.6935336657070463