Skip to content

Commit

Permalink
Merge pull request #19 from shivendrra/dev
Browse files Browse the repository at this point in the history
made new updates for pypi deployment
  • Loading branch information
shivendrra committed Sep 6, 2024
2 parents 59320e2 + 96af03d commit 8c7c707
Show file tree
Hide file tree
Showing 125 changed files with 1,750 additions and 8,039 deletions.
2 changes: 0 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@
__pycache__/
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
Expand Down
31 changes: 21 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,14 @@ git clone https://github.com/shivendrra/axon.git
cd axon
```

or

Install via pip:

```bash
pip install axon-pypi
```

## Usage

You can use this similar to micrograd to build a simple neural network or do scalar level backprop.
Expand All @@ -33,11 +41,12 @@ You can use this similar to micrograd to build a simple neural network or do sca
#### Axon.array

```python
from axon.base import array
import axon
from axon import array

# Create two 2D arrays
a = array([[1, 2], [3, 4]], dtype='int32')
b = array([[5, 6], [7, 8]], dtype='int32')
a = array([[1, 2], [3, 4]], dtype=axon.int32)
b = array([[5, 6], [7, 8]], dtype=axon.int32)

# Addition
c = a + b
Expand Down Expand Up @@ -65,16 +74,17 @@ Matrix Multiplication:

anyway, prefer documentation for detailed usage guide:

1. [axon.doc](https://github.com/shivendrra/axon/blob/main/docs/axon.md): for development purpose
2. [usage.doc](https://github.com/shivendrra/axon/blob/main/docs/usage.md): for using it like numpy
1. [axon.md](https://github.com/shivendrra/axon/blob/main/docs/axon.md): for development purpose
2. [usage.md](https://github.com/shivendrra/axon/blob/main/docs/usage.md): for using it like numpy
3. [axon_micro.md]((https://github.com/shivendrra/axon/blob/main/docs/axon_micro.md)): for axon.micro i.e. scalar autograd engine

#### Axon.micro
```python

from axon.micro import value
from axon.micro import scalar

a = value(2)
b = value(3)
a = scalar(2)
b = scalar(3)

c = a + b
d = a * b
Expand Down Expand Up @@ -123,10 +133,11 @@ git push origin my-feature-branch

## Testing

To run the unit tests you will have to install PyTorch, which the tests use as a reference for verifying the correctness of the calculated gradients. Then simply:
To run the unit tests you will have to install PyTorch & Numpy, which the tests use as a reference for verifying the correctness of the calculated gradients & calculated values. Then simply run each file according to your prefrence:

```shell
python -m pytest
python -m tests/test_array.py # for testing the axon functions with numpy
python -m tests/test_micro.py # for testing the axon.micro functions with pytorch
```

## Contributing
Expand Down
7 changes: 4 additions & 3 deletions axon/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from .base import array, int8, int16, int32, int64, float16, float32, float64, double, long
from .ops import *
from .utils import *
from ._ops import *
from ._utils import *
from ._random import Random

__all__ = ['array', 'int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64', 'double', 'long']
random = Random(seed=200)
14 changes: 13 additions & 1 deletion axon/ops.py → axon/_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,4 +164,16 @@ def det(data:Union[array, list]) -> array:

def swap_axes(data:Union[array, list], axis1:int, axis2:int) -> array:
data = data if isinstance(data, array) else array(data)
return data.swap_axes(axis1, axis2)
return data.swap_axes(axis1, axis2)

def exp(data:Union[array, list]) -> array:
data = data if isinstance(data, array) else array(data)
return data.exp()

def sum(data:Union[array, list], axis:Optional[int]=None, keepdims:bool=False) -> array:
data = data if isinstance(data, array) else array(data)
return data.sum(axis=axis, keepdims=keepdims)

def log(data:Union[array, list]) -> array:
data = data if isinstance(data, array) else array(data)
return data.log()
78 changes: 78 additions & 0 deletions axon/_random.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import random
import math

class Random:
def __init__(self, seed=None):
self.seed(seed)

def seed(self, seed=None):
random.seed(seed)

def randint(self, low, high=None, size=None):
"""
Return random integers from `low` (inclusive) to `high` (exclusive).
If high is None, then return integers from 0 to `low`.
"""
if high is None:
high = low
low = 0
return self._generate_random(lambda: random.randint(low, high - 1), size)

def rand(self, *size):
"""
Generate random float numbers between 0 and 1. Works like np.random.rand.
"""
return self._generate_random(random.random, size)

def uniform(self, low=0.0, high=1.0, size=None):
"""
Return random floats in the half-open interval [low, high).
"""
return self._generate_random(lambda: random.uniform(low, high), size)

def randn(self, *size):
"""
Generate random numbers from a standard normal distribution using Box-Muller transform.
"""
def box_muller():
u1 = random.random()
u2 = random.random()
z0 = math.sqrt(-2.0 * math.log(u1)) * math.cos(2.0 * math.pi * u2)
return z0
return self._generate_random(box_muller, size)

def choice(self, a, size=None, replace=True):
"""
Generate a random sample from a given 1D array `a`.
If replace is False, it generates without replacement.
"""
if not replace and size > len(a):
raise ValueError("Cannot take a larger sample than population when 'replace=False'")

if replace:
return self._generate_random(lambda: random.choice(a), size)
else:
return random.sample(a, size)

def _generate_random(self, func, size):
"""
Utility function to generate random numbers with or without shape.
If `size` is None, a single random value is returned.
"""
if size is None:
return func()
if isinstance(size, int):
return [func() for _ in range(size)]
elif isinstance(size, tuple):
return self._nested_list(func, size)
else:
raise ValueError(f"Invalid size: {size}")

def _nested_list(self, func, shape):
"""
Recursively create a nested list with the given shape.
"""
if len(shape) == 1:
return [func() for _ in range(shape[0])]
else:
return [self._nested_list(func, shape[1:]) for _ in range(shape[0])]
File renamed without changes.
86 changes: 74 additions & 12 deletions axon/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,16 +49,26 @@ def format_element(element):

formatted_data = format_element(self.data)

def truncate_list(data, max_items=8):
if len(data) > max_items:
return data[:max_items // 2] + ['...'] + data[-max_items // 2:]
return data

def format_data(data, level=0):
if isinstance(data[0], list):
inner = ",\n".join(["\t" * (level + 1) + format_data(sub_data, level + 1) for sub_data in data])
if len(data) > 8:
data = truncate_list(data) # Truncate rows if there are more than 8 arrays
inner = ",\n".join([" " * (level + 1) + format_data(sub_data, level + 1) for sub_data in data])
return f"[\n{inner}\n" + " " * level + "]"
return "[" + ", ".join(data) + "]"
else:
# Truncate individual row elements if they exceed 8
data = truncate_list(data)
return "[" + ", ".join(data) + "]"

formatted_str = format_data(formatted_data, 0)
formatted_str = formatted_str.replace("\t", " ")
return f"array({formatted_str}, dtype={self.dtype})\n"

def __getitem__(self, index:tuple):
if isinstance(index, tuple):
data = self.data
Expand Down Expand Up @@ -129,7 +139,7 @@ def flatten(self, start_dim:int=0, end_dim:int=-1) -> List["array"]:
def swap_axes(self, axis1:int, axis2:int) -> List["array"]:
axis1 = self.ndim + axis1 if axis1 < 0 else axis1
axis2 = self.ndim + axis2 if axis2 < 0 else axis2
return array(swap_axes(self.data, axis1, axis2), dtype=self.dtype)
return array( swap_axes(self.data, axis1, axis2), dtype=self.dtype)

def unsqueeze(self, dim:int=0):
dim = dim if dim > 0 else self.ndim + dim
Expand Down Expand Up @@ -172,6 +182,7 @@ def _add(a, b):
if self.size == other.size:
return array(_add(self.data, other.data), dtype=self.dtype)
else:
print(self.size, other.size)
raise ValueError("shapes are incompatible for operation")

def __mul__(self, other:List["array"]) -> List["array"]:
Expand All @@ -198,10 +209,42 @@ def __matmul__(self, other:List["array"]) -> List["array"]:
return array(out, dtype=self.dtype)

def __sub__(self, other:List["array"]) -> List["array"]:
return self + (-other)
other = other if isinstance(other, array) else array(other, dtype=self.dtype)
def _sub(a, b):
if isinstance(a, list):
return [_sub(_a, _b) for _a, _b in zip(a, b)]
else:
return a + b

target_shape, requires_broadcasting = broadcast_shape(self.shape, other.shape)

if requires_broadcasting:
self.data = handle_conversion(broadcast(self.data, target_shape), self.dtype)
other.data = handle_conversion(broadcast(other.data, target_shape), other.dtype)

if self.size == other.size:
return array(_sub(self.data, other.data), dtype=self.dtype)
else:
raise ValueError("shapes are incompatible for operation")

def __rsub__(self, other:List["array"]) -> List["array"]:
return other + (-self)
other = other if isinstance(other, array) else array(other, dtype=self.dtype)
def _sub(a, b):
if isinstance(a, list):
return [_sub(_a, _b) for _a, _b in zip(a, b)]
else:
return a + b

target_shape, requires_broadcasting = broadcast_shape(self.shape, other.shape)

if requires_broadcasting:
self.data = handle_conversion(broadcast(self.data, target_shape), self.dtype)
other.data = handle_conversion(broadcast(other.data, target_shape), other.dtype)

if self.size == other.size:
return array(_sub(other.data, self.data), dtype=self.dtype)
else:
raise ValueError("shapes are incompatible for operation")

def __rmul__(self, other:List["array"]) -> List["array"]:
return other * self
Expand Down Expand Up @@ -230,6 +273,14 @@ def _pow(data, pow):
return math.pow(data, pow)

return array(_pow(self.data, pow), dtype=array.float32)

def exp(self) -> List["array"]:
def _exp(data):
if isinstance(data, list):
return [_exp(d) for d in data]
return math.exp(data)

return array(_exp(self.data), dtype=array.float32)

def relu(self) -> List["array"]:
def _apply(data):
Expand Down Expand Up @@ -327,7 +378,7 @@ def det(self) -> List["array"]:
out = determinant(self.data)
return array(out, dtype=self.dtype)

def mean(self, axis:Optional[int]=None, keepdims:bool=False) -> list[float]:
def mean(self, axis:Optional[int]=None, keepdims:bool=False) -> List["array"]:
if axis is None:
flat_array = flatten(self.data)
mean_val = sum(flat_array) / len(flat_array)
Expand All @@ -340,7 +391,7 @@ def mean(self, axis:Optional[int]=None, keepdims:bool=False) -> list[float]:
out = mean_axis(self.data, axis, keepdims)
return array(out, dtype=self.dtype)

def var(self, axis:Optional[int]=None, ddof:int=0, keepdims:bool=False) -> list[float]:
def var(self, axis:Optional[int]=None, ddof:int=0, keepdims:bool=False) -> List["array"]:
if axis is None:
flat_array = flatten(self.data)
mean_value = sum(flat_array) / len(flat_array)
Expand All @@ -355,7 +406,7 @@ def var(self, axis:Optional[int]=None, ddof:int=0, keepdims:bool=False) -> list[
out = var_axis(self.data, mean_values, axis, ddof, keepdims)
return array(out, dtype=self.dtype)

def std(self, axis:Optional[int]=None, ddof:int=0, keepdims:bool=False) -> list[float]:
def std(self, axis:Optional[int]=None, ddof:int=0, keepdims:bool=False) -> List["array"]:
variance = self.var(axis=axis, ddof=ddof, keepdims=keepdims).data
def _std(var):
if isinstance(var, list):
Expand All @@ -367,14 +418,25 @@ def _std(var):
out = _std(variance)
return array(out, dtype=self.dtype)

def sum(self, axis:Optional[int]=None, keepdims:bool=False):
def sum(self, axis:Optional[int]=None, keepdims:bool=False) -> List["array"]:
if axis == None:
if keepdims:
out = [[sum(flatten(self.data))]]
else:
out = sum(flatten(self.data))
if axis == 0:
elif axis == 0:
out = sum_axis0(self.data)
else:
out = sum_axis(self.data, axis, keepdims)
return array(out, dtype=self.dtype)
return array(out, dtype=self.dtype)

def log(self) -> List["array"]:
def _log_element(element):
if isinstance(element, list):
return [_log_element(sub_element) for sub_element in element]
else:
if element <= 0:
raise ValueError(f"Logarithm undefined for non-positive values: {element}")
return math.log(element)

return array(_log_element(self.data), dtype=self.dtype)
Loading

0 comments on commit 8c7c707

Please sign in to comment.