diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..8db2976b --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,12 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: ".github/workflows" + schedule: + interval: "monthly" + groups: + actions: + patterns: + - "*" + labels: + - "infrastructure" diff --git a/.github/workflows/ci_tests_run_notebooks.yml b/.github/workflows/ci_tests_run_notebooks.yml index 31ddb5d8..7b54b916 100644 --- a/.github/workflows/ci_tests_run_notebooks.yml +++ b/.github/workflows/ci_tests_run_notebooks.yml @@ -35,15 +35,20 @@ jobs: name: with Python 3.11 and latest released version of dependencies os: ubuntu-latest + - python-version: '3.12' + toxenv: py312-test-predeps + name: with Python 3.12 and latest or pre-release version of dependencies + os: ubuntu-latest + - python-version: '3.12' toxenv: py312-test-devdeps name: with Python 3.12 and developer versioned dependencies os: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 #v5.6.0 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index be39709f..642252b7 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -5,7 +5,7 @@ jobs: name: Run CircleCI artifacts redirector steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@master + uses: scientific-python/circleci-artifacts-redirector-action@839631420e45a08af893032e5a5e8843bf47e8ff # v1.2.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} api-token: ${{ secrets.CIRCLE_TOKEN }} diff --git a/.github/workflows/conda.yml b/.github/workflows/conda.yml index d94eaafb..18da4854 100644 --- a/.github/workflows/conda.yml +++ b/.github/workflows/conda.yml @@ -24,13 +24,12 @@ jobs: shell: bash -l {0} steps: - - uses: actions/checkout@v4 - - uses: conda-incubator/setup-miniconda@v3 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 with: auto-update-conda: true activate-environment: numpy-tutorials environment-file: environment.yml - miniforge-variant: Mambaforge miniforge-version: latest use-mamba: true python-version: "3.11" @@ -43,7 +42,7 @@ jobs: conda list make -C site/ SPHINXOPTS="-nWT --keep-going" html - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: sphinx-build-artifact path: site/_build/html/reports diff --git a/.gitignore b/.gitignore index a1654131..bee3f8ec 100644 --- a/.gitignore +++ b/.gitignore @@ -99,4 +99,5 @@ content/tutorial-x-ray-image-processing/xray_image.gif content/video content/*ipynb content/tutorial-nlp-from-scratch/parameters.npy -content/tutorial-nlp-from-scratch/*ipynb \ No newline at end of file +content/tutorial-nlp-from-scratch/*ipynb +content/x_y-squared* diff --git a/content/mooreslaw-tutorial.md b/content/mooreslaw-tutorial.md index b9fefc8d..42c0de15 100644 --- a/content/mooreslaw-tutorial.md +++ b/content/mooreslaw-tutorial.md @@ -21,8 +21,7 @@ _The number of transistors reported per a given chip plotted on a log scale in t In 1965, engineer Gordon Moore [predicted](https://en.wikipedia.org/wiki/Moore%27s_law) that transistors on a chip would double every two years in the coming decade -[[1](https://en.wikipedia.org/wiki/Moore%27s_law), -[2](https://newsroom.intel.com/wp-content/uploads/sites/11/2018/05/moores-law-electronics.pdf)]. +[[1](https://en.wikipedia.org/wiki/Moore%27s_law)]. You'll compare Moore's prediction against actual transistor counts in the 53 years following his prediction. You will determine the best-fit constants to describe the exponential growth of transistors on semiconductors compared to Moore's Law. @@ -130,7 +129,7 @@ print("This is x{:.2f} more transistors than 1971".format(ML_1973 / ML_1971)) Now, make a prediction based upon the historical data for semiconductors per chip. The [Transistor Count -\[4\]](https://en.wikipedia.org/wiki/Transistor_count#Microprocessors) +\[3\]](https://en.wikipedia.org/wiki/Transistor_count#Microprocessors) each year is in the `transistor_data.csv` file. Before loading a \*.csv file into a NumPy array, its a good idea to inspect the structure of the file first. Then, locate the columns of interest and save them to a @@ -183,7 +182,7 @@ print("trans. cnt:\t", transistor_count[:10]) You are creating a function that predicts the transistor count given a year. You have an _independent variable_, `year`, and a _dependent -variable_, `transistor_count`. Transform the independent variable to +variable_, `transistor_count`. Transform the dependent variable to log-scale, $y_i = \log($ `transistor_count[i]` $),$ @@ -520,7 +519,7 @@ double every two years from 1965 through 1975, but the average growth has maintained a consistent increase of $\times 1.98 \pm 0.01$ every two years from 1971 through 2019. In 2015, Moore revised his prediction to say Moore's law should hold until 2025. -[[3](https://spectrum.ieee.org/computing/hardware/gordon-moore-the-man-whose-name-means-progress)]. +[[2](https://spectrum.ieee.org/computing/hardware/gordon-moore-the-man-whose-name-means-progress)]. You can share these results as a zipped NumPy array file, `mooreslaw_regression.npz`, or as another csv, `mooreslaw_regression.csv`. The amazing progress in semiconductor @@ -533,6 +532,5 @@ has been over the last half-century. ## References 1. ["Moore's Law." Wikipedia article. Accessed Oct. 1, 2020.](https://en.wikipedia.org/wiki/Moore%27s_law) -2. [Moore, Gordon E. (1965-04-19). "Cramming more components onto integrated circuits". intel.com. Electronics Magazine. Retrieved April 1, 2020.](https://newsroom.intel.com/wp-content/uploads/sites/11/2018/05/moores-law-electronics.pdf) -3. [Courtland, Rachel. "Gordon Moore: The Man Whose Name Means Progress." IEEE Spectrum. 30 Mar. 2015.](https://spectrum.ieee.org/computing/hardware/gordon-moore-the-man-whose-name-means-progress). -4. ["Transistor Count." Wikipedia article. Accessed Oct. 1, 2020.](https://en.wikipedia.org/wiki/Transistor_count#Microprocessors) +2. [Courtland, Rachel. "Gordon Moore: The Man Whose Name Means Progress." IEEE Spectrum. 30 Mar. 2015.](https://spectrum.ieee.org/computing/hardware/gordon-moore-the-man-whose-name-means-progress). +3. ["Transistor Count." Wikipedia article. Accessed Oct. 1, 2020.](https://en.wikipedia.org/wiki/Transistor_count#Microprocessors) diff --git a/content/save-load-arrays.md b/content/save-load-arrays.md index 6768d938..2620bfaf 100644 --- a/content/save-load-arrays.md +++ b/content/save-load-arrays.md @@ -187,18 +187,8 @@ np.savetxt("x_y-squared.csv", X=array_out, header="x, y", delimiter=",") Open the file, `x_y-squared.csv`, and you'll see the following: -``` -# x, y -0.000000000000000000e+00,0.000000000000000000e+00 -1.000000000000000000e+00,1.000000000000000000e+00 -2.000000000000000000e+00,4.000000000000000000e+00 -3.000000000000000000e+00,9.000000000000000000e+00 -4.000000000000000000e+00,1.600000000000000000e+01 -5.000000000000000000e+00,2.500000000000000000e+01 -6.000000000000000000e+00,3.600000000000000000e+01 -7.000000000000000000e+00,4.900000000000000000e+01 -8.000000000000000000e+00,6.400000000000000000e+01 -9.000000000000000000e+00,8.100000000000000000e+01 +```{code-cell} +!head x_y-squared.csv ``` ## Our arrays as a csv file diff --git a/content/tutorial-air-quality-analysis.md b/content/tutorial-air-quality-analysis.md index 14e46acf..fda0dbf3 100644 --- a/content/tutorial-air-quality-analysis.md +++ b/content/tutorial-air-quality-analysis.md @@ -97,7 +97,7 @@ With this, we have successfully imported the data and checked that it is complet ## Calculating the Air Quality Index -We will calculate the AQI using [the method](https://app.cpcbccr.com/ccr_docs/FINAL-REPORT_AQI_.pdf) adopted by the [Central Pollution Control Board](https://www.cpcb.nic.in/national-air-quality-index) of India. To summarize the steps: +We will calculate the AQI using [the method](https://app.cpcbccr.com/ccr_docs/FINAL-REPORT_AQI_.pdf) adopted by the [Central Pollution Control Board](https://www.cpcb.nic.in/national-air-quality-index/) of India. To summarize the steps: - Collect 24-hourly average concentration values for the standard pollutants; 8-hourly in case of CO and O3. diff --git a/content/tutorial-deep-learning-on-mnist.md b/content/tutorial-deep-learning-on-mnist.md index a41438c5..e1085b91 100644 --- a/content/tutorial-deep-learning-on-mnist.md +++ b/content/tutorial-deep-learning-on-mnist.md @@ -33,7 +33,7 @@ This tutorial was adapted from the work by [Andrew Trask](https://github.com/iam The reader should have some knowledge of Python, NumPy array manipulation, and linear algebra. In addition, you should be familiar with main concepts of [deep learning](https://en.wikipedia.org/wiki/Deep_learning). -To refresh the memory, you can take the [Python](https://docs.python.org/dev/tutorial/index.html) and [Linear algebra on n-dimensional arrays](https://numpy.org/doc/stable/user/tutorial-svd.html) tutorials. +To refresh the memory, you can take the [Python](https://docs.python.org/dev/tutorial/index.html) and [Linear algebra on n-dimensional arrays](https://numpy.org/numpy-tutorials/content/tutorial-svd.html) tutorials. You are advised to read the [Deep learning](http://www.cs.toronto.edu/~hinton/absps/NatureDeepReview.pdf) paper published in 2015 by Yann LeCun, Yoshua Bengio, and Geoffrey Hinton, who are regarded as some of the pioneers of the field. You should also consider reading Andrew Trask's [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning), which teaches deep learning with NumPy. @@ -62,7 +62,7 @@ This tutorial can be run locally in an isolated environment, such as [Virtualenv ## 1. Load the MNIST dataset -In this section, you will download the zipped MNIST dataset files originally stored in [Yann LeCun's website](http://yann.lecun.com/exdb/mnist/). Then, you will transform them into 4 files of NumPy array type using built-in Python modules. Finally, you will split the arrays into training and test sets. +In this section, you will download the zipped MNIST dataset files originally developed by Yann LeCun's research team. (More details of the MNIST dataset are available on [Kaggle](https://www.kaggle.com/datasets/hojjatk/mnist-dataset).) Then, you will transform them into 4 files of NumPy array type using built-in Python modules. Finally, you will split the arrays into training and test sets. **1.** Define a variable to store the training/test image/label names of the MNIST dataset in a list: @@ -99,7 +99,7 @@ import os data_dir = "../_data" os.makedirs(data_dir, exist_ok=True) -base_url = "https://github.com/rossbar/numpy-tutorial-data-mirror/blob/main/" +base_url = "https://ossci-datasets.s3.amazonaws.com/mnist/" for fname in data_sources.values(): fpath = os.path.join(data_dir, fname) diff --git a/content/tutorial-deep-reinforcement-learning-with-pong-from-pixels.md b/content/tutorial-deep-reinforcement-learning-with-pong-from-pixels.md index 69607388..1598e572 100644 --- a/content/tutorial-deep-reinforcement-learning-with-pong-from-pixels.md +++ b/content/tutorial-deep-reinforcement-learning-with-pong-from-pixels.md @@ -22,7 +22,7 @@ Help improve this article by developing an example with reduced dependency footprint! ``` -This tutorial demonstrates how to implement a deep reinforcement learning (RL) agent from scratch using a policy gradient method that learns to play the [Pong](https://gym.openai.com/envs/Pong-v0/) video game using screen pixels as inputs with NumPy. Your Pong agent will obtain experience on the go using an [artificial neural network](https://en.wikipedia.org/wiki/Artificial_neural_network) as its [policy](https://en.wikipedia.org/wiki/Reinforcement_learning). +This tutorial demonstrates how to implement a deep reinforcement learning (RL) agent from scratch using a policy gradient method that learns to play the [Pong](https://en.wikipedia.org/wiki/Pong) video game using screen pixels as inputs with NumPy. Your Pong agent will obtain experience on the go using an [artificial neural network](https://en.wikipedia.org/wiki/Artificial_neural_network) as its [policy](https://en.wikipedia.org/wiki/Reinforcement_learning). Pong is a 2D game from 1972 where two players use "rackets" to play a form of table tennis. Each player moves the racket up and down the screen and tries to hit a ball in their opponent's direction by touching it. The goal is to hit the ball such that it goes past the opponent's racket (they miss their shot). According to the rules, if a player reaches 21 points, they win. In Pong, the RL agent that learns to play against an opponent is displayed on the right. @@ -32,7 +32,7 @@ This example is based on the [code](https://gist.github.com/karpathy/a4166c7fe25 ## Prerequisites -- **OpenAI Gym**: To help with the game environment, you will use [Gym](https://gym.openai.com) — an open-source Python interface [developed by OpenAI](https://arxiv.org/abs/1606.01540) that helps perform RL tasks while supporting many simulation environments. +- **OpenAI Gym**: To help with the game environment, you will use [Gym](https://github.com/openai/gym) — an open-source Python interface [developed by OpenAI](https://arxiv.org/abs/1606.01540) that helps perform RL tasks while supporting many simulation environments. - **Python and NumPy**: The reader should have some knowledge of Python, NumPy array manipulation, and linear algebra. - **Deep learning and deep RL**: You should be familiar with main concepts of [deep learning](https://en.wikipedia.org/wiki/Deep_learning), which are explained in the [Deep learning](http://www.cs.toronto.edu/~hinton/absps/NatureDeepReview.pdf) paper published in 2015 by Yann LeCun, Yoshua Bengio, and Geoffrey Hinton, who are regarded as some of the pioneers of the field. The tutorial will try to guide you through the main concepts of deep RL and you will find various literature with links to original sources for your convenience. - **Jupyter notebook environments**: Because RL experiments can require high computing power, you can run the tutorial on the cloud for free using [Binder](https://mybinder.org) or [Google Colaboratory](https://colab.research.google.com/notebooks/intro.ipynb) (which offers free limited GPU and TPU acceleration). diff --git a/content/tutorial-nlp-from-scratch.md b/content/tutorial-nlp-from-scratch.md index 68a31d27..a4771883 100644 --- a/content/tutorial-nlp-from-scratch.md +++ b/content/tutorial-nlp-from-scratch.md @@ -31,7 +31,7 @@ Today, Deep Learning is getting adopted in everyday life and now it is more impo ## Prerequisites -You are expected to be familiar with the Python programming language and array manipulation with NumPy. In addition, some understanding of Linear Algebra and Calculus is recommended. You should also be familiar with how Neural Networks work. For reference, you can visit the [Python](https://docs.python.org/dev/tutorial/index.html), [Linear algebra on n-dimensional arrays](https://numpy.org/doc/stable/user/tutorial-svd.html) and [Calculus](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/multivariable-calculus.html) tutorials. +You are expected to be familiar with the Python programming language and array manipulation with NumPy. In addition, some understanding of Linear Algebra and Calculus is recommended. You should also be familiar with how Neural Networks work. For reference, you can visit the [Python](https://docs.python.org/dev/tutorial/index.html), [Linear algebra on n-dimensional arrays](https://numpy.org/numpy-tutorials/content/tutorial-svd.html) and [Calculus](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/multivariable-calculus.html) tutorials. To get a refresher on Deep Learning basics, You should consider reading [the d2l.ai book](https://d2l.ai/chapter_recurrent-neural-networks/index.html), which is an interactive deep learning book with multi-framework code, math, and discussions. You can also go through the [Deep learning on MNIST from scratch tutorial](https://numpy.org/numpy-tutorials/content/tutorial-deep-learning-on-mnist.html) to understand how a basic neural network is implemented from scratch. @@ -107,8 +107,8 @@ We made sure to include different demographics in our data and included a range 1. **Text Denoising** : Before converting your text into vectors, it is important to clean it and remove all unhelpful parts a.k.a the noise from your data by converting all characters to lowercase, removing html tags, brackets and stop words (words that don't add much meaning to a sentence). Without this step the dataset is often a cluster of words that the computer doesn't understand. -2. **Converting words to vectors** : A word embedding is a learned representation for text where words that have the same meaning have a similar representation. Individual words are represented as real-valued vectors in a predefined vector space. GloVe is an unsupervised algorithm developed by Stanford for generating word embeddings by generating global word-word co-occurence matrix from a corpus. You can download the zipped files containing the embeddings from https://nlp.stanford.edu/projects/glove/. Here you can choose any of the four options for different sizes or training datasets. We have chosen the least memory consuming embedding file. - >The GloVe word embeddings include sets that were trained on billions of tokens, some up to 840 billion tokens. These algorithms exhibit stereotypical biases, such as gender bias which can be traced back to the original training data. For example certain occupations seem to be more biased towards a particular gender, reinforcing problematic stereotypes. The nearest solution to this problem are some de-biasing algorithms as the one presented in https://web.stanford.edu/class/archive/cs/cs224n/cs224n.1184/reports/6835575.pdf which one can use on embeddings of their choice to mitigate bias, if present. +2. **Converting words to vectors** : A word embedding is a learned representation for text where words that have the same meaning have a similar representation. Individual words are represented as real-valued vectors in a predefined vector space. GloVe is an unsupervised algorithm developed by Stanford for generating word embeddings by generating global word-word co-occurence matrix from a corpus. You can download the zipped files containing the embeddings from [the GloVe official website](https://nlp.stanford.edu/projects/glove/). Here you can choose any of the four options for different sizes or training datasets. We have chosen the least memory consuming embedding file. + >The GloVe word embeddings include sets that were trained on billions of tokens, some up to 840 billion tokens. These algorithms exhibit stereotypical biases, such as gender bias which can be traced back to the original training data. For example certain occupations seem to be more biased towards a particular gender, reinforcing problematic stereotypes. The nearest solution to this problem are some de-biasing algorithms as the one presented in [this research article](https://web.stanford.edu/class/archive/cs/cs224n/cs224n.1184/reports/6835575.pdf), which one can use on embeddings of their choice to mitigate bias, if present. You'll start with importing the necessary packages to build our Deep Learning network. @@ -441,7 +441,7 @@ emb_path = textproc.unzipper(glove, 'glove.6B.300d.txt') emb_matrix = textproc.loadGloveModel(emb_path) ``` -## 3. Build the Deep Learning Model¶ +## 3. Build the Deep Learning Model It is time to start implementing our LSTM! You will have to first familiarize yourself with some high-level concepts of the basic building blocks of a deep learning model. You can refer to the [Deep learning on MNIST from scratch tutorial](https://numpy.org/numpy-tutorials/content/tutorial-deep-learning-on-mnist.html) for the same. You will then learn how a Recurrent Neural Network differs from a plain Neural Network and what makes it so suitable for processing sequential data. Afterwards, you will construct the building blocks of a simple deep learning model in Python and NumPy and train it to learn to classify the sentiment of a piece of text as positive or negative with a certain level of accuracy @@ -1049,11 +1049,11 @@ To further enhance and optimize your neural network model, you can consider one - Initialize weights using [Xavier Initialization](https://d2l.ai/chapter_multilayer-perceptrons/numerical-stability-and-init.html#xavier-initialization) to prevent vanishing/exploding gradients instead of initializing them randomly. - Replace LSTM with a [Bidirectional LSTM](https://en.wikipedia.org/wiki/Bidirectional_recurrent_neural_networks) to use both left and right context for predicting sentiment. -Nowadays, LSTMs have been replaced by the [Transformer](https://jalammar.github.io/illustrated-transformer/)( which uses [Attention](https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/) to tackle all the problems that plague an LSTM such as as lack of [transfer learning](https://en.wikipedia.org/wiki/Transfer_learning), lack of [parallel training](https://web.stanford.edu/~rezab/classes/cme323/S16/projects_reports/hedge_usmani.pdf) and a long gradient chain for lengthy sequences +Nowadays, LSTMs have been replaced by the [Transformer](https://jalammar.github.io/illustrated-transformer/) which uses [Attention](https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/) to tackle all the problems that plague an LSTM such as lack of [transfer learning](https://en.wikipedia.org/wiki/Transfer_learning), lack of [parallel training](https://web.stanford.edu/~rezab/classes/cme323/S16/projects_reports/hedge_usmani.pdf), and a long gradient chain for lengthy sequences. -Building a neural network from scratch with NumPy is a great way to learn more about NumPy and about deep learning. However, for real-world applications you should use specialized frameworks — such as PyTorch, JAX, TensorFlow or MXNet — that provide NumPy-like APIs, have built-in automatic differentiation and GPU support, and are designed for high-performance numerical computing and machine learning. +Building a neural network from scratch with NumPy is a great way to learn more about NumPy and about deep learning. However, for real-world applications you should use specialized frameworks — such as PyTorch, JAX or TensorFlow — that provide NumPy-like APIs, have built-in automatic differentiation and GPU support, and are designed for high-performance numerical computing and machine learning. Finally, to know more about how ethics come into play when developing a machine learning model, you can refer to the following resources : -- Data ethics resources by the Turing Institute. https://www.turing.ac.uk/research/data-ethics +- [Data ethics resources](https://www.turing.ac.uk/research/data-ethics) by the Turing Institute - Considering how artificial intelligence shifts power, an [article](https://www.nature.com/articles/d41586-020-02003-2) and [talk](https://slideslive.com/38923453/the-values-of-machine-learning) by Pratyusha Kalluri - More ethics resources on [this blog post](https://www.fast.ai/2018/09/24/ai-ethics-resources/) by Rachel Thomas and the [Radical AI podcast](https://www.radicalai.org/) diff --git a/content/tutorial-static_equilibrium.md b/content/tutorial-static_equilibrium.md index 0e8b82f7..f649e56e 100644 --- a/content/tutorial-static_equilibrium.md +++ b/content/tutorial-static_equilibrium.md @@ -263,7 +263,7 @@ print("Reaction moment =", M) ``` ### Another Example -Let's look at a slightly more complicated model. In this example you will be observing a beam with two cables and an applied force. This time you need to find both the tension in the cords and the reaction forces of the beam. *(Source: [Vector Mechanics for Engineers: Statics](https://www.mheducation.com/highered/product/vector-mechanics-engineers-statics-beer-johnston/M9780077687304.html), Problem 4.106)* +Let's look at a slightly more complicated model. In this example you will be observing a beam with two cables and an applied force. This time you need to find both the tension in the cords and the reaction forces of the beam. *(Source: [Vector Mechanics for Engineers: Statics and Dynamics](https://www.mheducation.com/highered/product/Vector-Mechanics-for-Engineers-Statics-and-Dynamics-Beer.html), Problem 4.106)* ![image.png](_static/problem4.png) @@ -387,5 +387,5 @@ This same process can be applied to kinetic problems or in any number of dimensi ### References -1. [Vector Mechanics for Engineers: Statics (Beer & Johnston & Mazurek)](https://www.mheducation.com/highered/product/vector-mechanics-engineers-statics-beer-johnston/M9780077687304.html) +1. [Vector Mechanics for Engineers: Statics and Dynamics (Beer & Johnston & Mazurek & et al.)](https://www.mheducation.com/highered/product/Vector-Mechanics-for-Engineers-Statics-and-Dynamics-Beer.html) 2. [NumPy Reference](https://numpy.org/doc/stable/reference/) diff --git a/content/tutorial-svd.md b/content/tutorial-svd.md index 3798636a..aded8df0 100644 --- a/content/tutorial-svd.md +++ b/content/tutorial-svd.md @@ -47,7 +47,7 @@ except ImportError: # Data was in scipy.misc prior to scipy v1.10 img = face() ``` -**Note**: If you prefer, you can use your own image as you work through this tutorial. In order to transform your image into a NumPy array that can be manipulated, you can use the `imread` function from the [matplotlib.pyplot](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.html#module-matplotlib.pyplot) submodule. Alternatively, you can use the [imageio.imread](https://imageio.readthedocs.io/en/stable/userapi.html#imageio.imread) function from the `imageio` library. Be aware that if you use your own image, you'll likely need to adapt the steps below. For more information on how images are treated when converted to NumPy arrays, see [A crash course on NumPy for images](https://scikit-image.org/docs/stable/user_guide/numpy_images.html) from the `scikit-image` documentation. +**Note**: If you prefer, you can use your own image as you work through this tutorial. In order to transform your image into a NumPy array that can be manipulated, you can use the `imread` function from the [matplotlib.pyplot](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.html#module-matplotlib.pyplot) submodule. Alternatively, you can use the [imageio.imread](https://imageio.readthedocs.io/en/stable/_autosummary/imageio.v3.imread.html) function from the `imageio` library. Be aware that if you use your own image, you'll likely need to adapt the steps below. For more information on how images are treated when converted to NumPy arrays, see [A crash course on NumPy for images](https://scikit-image.org/docs/stable/user_guide/numpy_images.html) from the `scikit-image` documentation. +++ @@ -154,7 +154,7 @@ $$U \Sigma V^T = A$$ where $U$ and $V^T$ are square and $\Sigma$ is the same size as $A$. $\Sigma$ is a diagonal matrix and contains the [singular values](https://en.wikipedia.org/wiki/Singular_value) of $A$, organized from largest to smallest. These values are always non-negative and can be used as an indicator of the "importance" of some features represented by the matrix $A$. -Let's see how this works in practice with just one matrix first. Note that according to [colorimetry](https://en.wikipedia.org/wiki/Grayscale#Colorimetric_(perceptual_luminance-reserving)_conversion_to_grayscale), +Let's see how this works in practice with just one matrix first. Note that according to [colorimetry](https://en.wikipedia.org/wiki/Grayscale#Colorimetric_(perceptual_luminance-preserving)_conversion_to_grayscale), it is possible to obtain a fairly reasonable grayscale version of our color image if we apply the formula $$Y = 0.2126 R + 0.7152 G + 0.0722 B$$ diff --git a/content/x_y-squared.csv b/content/x_y-squared.csv deleted file mode 100644 index e74126ff..00000000 --- a/content/x_y-squared.csv +++ /dev/null @@ -1,11 +0,0 @@ -# x, y -0.000000000000000000e+00,0.000000000000000000e+00 -1.000000000000000000e+00,1.000000000000000000e+00 -2.000000000000000000e+00,4.000000000000000000e+00 -3.000000000000000000e+00,9.000000000000000000e+00 -4.000000000000000000e+00,1.600000000000000000e+01 -5.000000000000000000e+00,2.500000000000000000e+01 -6.000000000000000000e+00,3.600000000000000000e+01 -7.000000000000000000e+00,4.900000000000000000e+01 -8.000000000000000000e+00,6.400000000000000000e+01 -9.000000000000000000e+00,8.100000000000000000e+01 diff --git a/content/x_y-squared.npz b/content/x_y-squared.npz deleted file mode 100644 index 6c32f196..00000000 Binary files a/content/x_y-squared.npz and /dev/null differ diff --git a/environment.yml b/environment.yml index 509d1bc7..15dd1085 100644 --- a/environment.yml +++ b/environment.yml @@ -14,3 +14,5 @@ dependencies: - myst-nb - sphinx-book-theme - sphinx-copybutton + # to load the md files in binder + - jupytext diff --git a/site/conf.py b/site/conf.py index e1fe5b63..4aea25ca 100644 --- a/site/conf.py +++ b/site/conf.py @@ -65,7 +65,7 @@ "repository_branch": "main", "use_repository_button": True, "use_issues_button": True, - "use_edit_page_button": True, + "use_edit_page_button": False, "path_to_docs": "site/", "launch_buttons": { "binderhub_url": "https://mybinder.org", diff --git a/site/contributing.md b/site/contributing.md index c35db184..e95774a5 100644 --- a/site/contributing.md +++ b/site/contributing.md @@ -42,7 +42,7 @@ You may notice our content is in markdown format (`.md` files). We review and host notebooks in the [MyST-NB](https://myst-nb.readthedocs.io/) format. We accept both Jupyter notebooks (`.ipynb`) and MyST-NB notebooks (`.md`). If you want to sync your `.ipynb` to your `.md` file follow the [pairing -tutorial](content/pairing.md). +tutorial](content/pairing). ```{toctree} :hidden: diff --git a/tox.ini b/tox.ini index 4c3157e3..ff48327d 100644 --- a/tox.ini +++ b/tox.ini @@ -28,7 +28,7 @@ deps = devdeps: matplotlib>=0.0.dev0 devdeps: pandas>=0.0.dev0 -allowlist_externals = bash +allowlist_externals = bash, make commands = # Force numpy reinstall to work around upper version limits in downstream dependencies (e.g. pandas) pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy