No tienes acceso a esta clase

¡Continúa aprendiendo! Únete y comienza a potenciar tu carrera

Reutilización de computación intensiva

7/19
Recursos

Aportes 4

Preguntas 0

Ordenar por:

Los aportes, preguntas y respuestas son vitales para aprender en comunidad. Regístrate o inicia sesión para participar.

Mi solucion 😄

package main

import (
	"fmt"
	"sync"
	"time"
)

func ExpensiveFibonacci(n interface{}) int {
	switch n := n.(type) {
	case int:
		if n <= 1 {
			return n
		}
		return ExpensiveFibonacci(n-1) + ExpensiveFibonacci(n-2)
	default:
		return 0
	}
}

// Memory holds a function and a map of results
type Memory struct {
	f     Function                       // Function to be used
	cache map[interface{}]FunctionResult // Map of results for a given key
	lock  sync.RWMutex                   // Lock to protect the cache

	InProgress map[interface{}]bool                  // Map of jobs in progress
	IsPending  map[interface{}][]chan FunctionResult // Map of jobs waiting for a response
}

// A function has to recive a value and return a value and an error
type Function func(key interface{}) (interface{}, error)

// The result of a function
type FunctionResult struct {
	value interface{}
	err   error
}

// NewCache creates a new cache
func NewCache(f Function) *Memory {
	return &Memory{
		f:          f,
		cache:      make(map[interface{}]FunctionResult),
		lock:       sync.RWMutex{},
		InProgress: make(map[interface{}]bool),
		IsPending:  make(map[interface{}][]chan FunctionResult),
	}
}

func (m *Memory) service(key interface{}) (interface{}, error) {
	// Check if the job is already in progress
	m.lock.RLock()
	_, ok := m.InProgress[key]
	m.lock.RUnlock()

	// If the job is already in progress, then wait for the response
	if ok {
		// If the job is already in progress, then wait for the response
		response := make(chan FunctionResult)
		defer close(response)

		// Add the channel to the pending list
		m.lock.Lock()
		m.IsPending[key] = append(m.IsPending[key], response)
		m.lock.Unlock()

		// Wait for the response
		res := <-response
		return res.value, res.err
	}

	// If the job is not in progress, then start the job
	m.lock.Lock()
	m.InProgress[key] = true
	m.lock.Unlock()

	// Start the job
	fmt.Printf("Starting job %d\n", key)
	fnresult, err := m.f(key)
	res := FunctionResult{value: fnresult, err: err}
	if err != nil {
		fmt.Printf("Error: %v\n", err)
	}

	// Get the pending workers for this job
	m.lock.RLock()
	pendingWorkers, exist := m.IsPending[key]
	m.lock.RUnlock()

	// If there are pending workers, then send the response
	if exist {
		for _, worker := range pendingWorkers {
			worker <- res
		}
	}

	// We are done with this job, reset the state
	m.lock.Lock()
	m.InProgress[key] = false
	m.IsPending[key] = make([]chan FunctionResult, 0)
	m.lock.Unlock()

	// Add the value to the cache
	m.lock.Lock()
	m.cache[key] = res
	m.lock.Unlock()

	fmt.Printf("Finished job %d, got %d\n", key, res)
	return res.value, res.err

}

// Get returns the value for a given key
func (m *Memory) Get(key interface{}) (interface{}, error) {

	// Lock the cache
	m.lock.RLock()

	// Check if the value is in the cache
	res, exist := m.cache[key]

	// Unlock the cache
	m.lock.RUnlock()

	// If the value is in the cache, return it
	if exist {
		return res.value, res.err
	}

	// If the value is not in the cache, then start the service
	res.value, res.err = m.service(key)
	return res.value, res.err

}

// Function to be used in the cache
func GetFibonacci(key interface{}) (interface{}, error) {
	return ExpensiveFibonacci(key), nil
}

func main() {
	empezar := time.Now()
	// Create a cache and some values
	cache := NewCache(GetFibonacci)
	values := []int{46, 46, 42, 42, 41, 41, 46, 46, 46, 42, 42, 46}

	var wg sync.WaitGroup

	// For each value to calculate, get the value and print the time it took to calculate
	for _, v := range values {
		go func(v int) {
			defer wg.Done()

			start := time.Now()

			res, err := cache.Get(v)
			if err != nil {
				fmt.Printf("Error: %v\n", err)
			}

			fmt.Printf("%v:%d took %v\n", v, res, time.Since(start))
		}(v)
		wg.Add(1)
	}
	wg.Wait()

	// This is to prove that cache is working
	fmt.Println()
	fmt.Println("Doing it all again!")
	for _, v := range values {
		go func(v int) {
			defer wg.Done()

			start := time.Now()

			res, err := cache.Get(v)
			if err != nil {
				fmt.Printf("Error: %v\n", err)
			}

			fmt.Printf("%v:%d took %v\n", v, res, time.Since(start))
		}(v)
		wg.Add(1)
	}

	wg.Wait()

	fmt.Printf("Total time: %v\n", time.Since(empezar))
}

Les comparto mi solución al reto.
Un sistema caché concurrente con Go: https://gist.github.com/UnPolinomio/5098c4146e97bd3c3aedc26e791e8785

Les comparto la solución que desarrollé para este desafío en éste enlace. Realicé un sistema de cache en memoria que se persiste en el sistema de archivos, al reiniciar la aplicación se vuelve a cargar el caché en memoria (característica inspirada en Redis)

Mi solución al reto: la función que calcula Fibonacci se ejecuta solo la cantidad de veces que es necesaria, de igual forma se guarda el caché de todo número previamente calculado. Esto mejora considerablemente el performance ya que se evita el consumo de CPU en la repetición del cálculo de Fibonacci para números repetidos.

package main

import (
	"fmt"
	"log"
	"sync"
	"time"
)

func Fibonacci(n int) int {
	if n <= 1 {
		return n
	}
	return Fibonacci(n-1) + Fibonacci(n-2)
}

type Memory struct {
	f          Function
	InProgress map[int]bool
	IsPending  map[int][]chan int
	cache      map[int]FunctionResult
	lock       sync.RWMutex
}

type Function func(key int) (int, error)

type FunctionResult struct {
	value interface{}
	err   error
}

func NewCache(f Function) *Memory {
	return &Memory{
		f:          f,
		InProgress: make(map[int]bool),
		IsPending:  make(map[int][]chan int),
		cache:      make(map[int]FunctionResult),
	}
}

func (m *Memory) Get(key int) (interface{}, error) {
	m.lock.Lock()
	result, exists := m.cache[key]
	m.lock.Unlock()
	if !exists {
		result.value, result.err = m.Work(key)
		m.lock.Lock()
		m.cache[key] = result
		m.lock.Unlock()
	}
	return result.value, result.err
}

func (m *Memory) Work(job int) (int, error) {
	m.lock.RLock()
	exists := m.InProgress[job]
	if exists {
		m.lock.RUnlock()
		response := make(chan int)
		defer close(response)

		m.lock.Lock()
		m.IsPending[job] = append(m.IsPending[job], response)
		m.lock.Unlock()
		resp := <-response
		return resp, nil
	}
	m.lock.RUnlock()

	m.lock.Lock()
	m.InProgress[job] = true
	m.lock.Unlock()

	result, _ := m.f(job)

	m.lock.RLock()
	pendingWorkers, exists := m.IsPending[job]
	m.lock.RUnlock()

	if exists {
		for _, pendingWorker := range pendingWorkers {
			pendingWorker <- result
		}
	}
	m.lock.Lock()
	m.InProgress[job] = false
	m.IsPending[job] = make([]chan int, 0)
	m.lock.Unlock()
	return result, nil
}

func GetFibonacci(n int) (int, error) {
	fmt.Println("Se ejecuto GetFibonacci")
	return Fibonacci(n), nil
}

func main() {
	timeInit := time.Now()
	cache := NewCache(GetFibonacci)
	fibo := []int{42, 40, 41, 42, 38, 40, 40, 41, 43, 43, 43}
	var wg sync.WaitGroup
	for _, n := range fibo {
		wg.Add(1)
		go func(index int) {
			defer wg.Done()
			start := time.Now()
			value, err := cache.Get(index)
			if err != nil {
				log.Println(err)
			}
			fmt.Printf("%d, %s, %d\n", index, time.Since(start), value)
		}(n)
	}
	wg.Wait()
	fmt.Printf("tiempo final %s\n", time.Since(timeInit))
}